1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-28 15:23:26 +00:00

Compare commits

..

4 Commits

Author SHA1 Message Date
albertony
da307bcd9a Go mod tidy jwt-go (golang-jwt) upgrade from v4 to v5 2025-01-15 16:45:02 +01:00
albertony
8c3ea2842c lib/jwtutil: upgrade jwt-go (golang-jwt) from v4 to v5 2025-01-15 16:44:58 +01:00
albertony
f4d7df1511 lib/jwtutil: rename StandardClaims to LegacyStandardClaims 2025-01-15 16:44:54 +01:00
albertony
fa3a8161cf Compatibility with jwt-go (golang-jwt) v5
Includes the StandardClaims implementation from jwt-go v4, where it was marked as
deprecated before removed in v5. The box backend needs this.

See #7115
2025-01-15 16:44:17 +01:00
394 changed files with 3669 additions and 9864 deletions

View File

@@ -26,7 +26,7 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.23']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.22', 'go1.23']
include:
- job_name: linux
@@ -80,6 +80,12 @@ jobs:
compile_all: true
deploy: true
- job_name: go1.22
os: ubuntu-latest
go: '1.22'
quicktest: true
racequicktest: true
- job_name: go1.23
os: ubuntu-latest
go: '1.23'
@@ -226,8 +232,6 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Go
id: setup-go
@@ -291,10 +295,6 @@ jobs:
- name: Scan for vulnerabilities
run: govulncheck ./...
- name: Scan edits of autogenerated files
run: bin/check_autogenerated_edits.py
if: github.event_name == 'pull_request'
android:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
timeout-minutes: 30

View File

@@ -0,0 +1,77 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
# GitHub Actions at the start of a workflow run to identify the job.
# This is used to authenticate against GitHub Container Registry.
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
# for more detailed information.
password: ${{ secrets.GITHUB_TOKEN }}
- name: Show disk usage
shell: bash
run: |
df -h .
- name: Build and publish image
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
push: true # push the image to ghcr
tags: |
ghcr.io/rclone/rclone:beta
rclone/rclone:beta
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
cache-from: type=gha, scope=${{ github.workflow }}
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
provenance: false
# Eventually cache will need to be cleared if builds more frequent than once a week
# https://github.com/docker/build-push-action/issues/252
- name: Show disk usage
shell: bash
run: |
df -h .

View File

@@ -1,294 +0,0 @@
---
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
name: Build & Push Docker Images
# Trigger the workflow on push or pull request
on:
push:
branches:
- '**'
tags:
- '**'
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
build-image:
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
include:
- platform: linux/amd64
runs-on: ubuntu-24.04
- platform: linux/386
runs-on: ubuntu-24.04
- platform: linux/arm64
runs-on: ubuntu-24.04-arm
- platform: linux/arm/v7
runs-on: ubuntu-24.04-arm
- platform: linux/arm/v6
runs-on: ubuntu-24.04-arm
name: Build Docker Image for ${{ matrix.platform }}
runs-on: ${{ matrix.runs-on }}
steps:
- name: Free Space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout Repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set REPO_NAME Variable
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Set PLATFORM Variable
run: |
platform=${{ matrix.platform }}
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
- name: Set CACHE_NAME Variable
shell: python
run: |
import os, re
def slugify(input_string, max_length=63):
slug = input_string.lower()
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
slug = slug.strip()
slug = re.sub(r'\s+', '-', slug)
slug = re.sub(r'-+', '-', slug)
slug = slug[:max_length]
slug = re.sub(r'[-]+$', '', slug)
return slug
ref_name_slug = "cache"
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"CACHE_NAME={ref_name_slug}\n")
- name: Get ImageOS
# There's no way around this, because "ImageOS" is only available to
# processes, but the setup-go action uses it in its key.
id: imageos
uses: actions/github-script@v7
with:
result-encoding: string
script: |
return process.env.ImageOS
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
with:
images: |
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Setup QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Load Go Build Cache for Docker
id: go-cache
uses: actions/cache@v4
with:
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
# Cache only the go builds, the module download is cached via the docker layer caching
path: |
go-build-cache
- name: Inject Go Build Cache into Docker
uses: reproducible-containers/buildkit-cache-dance@v3
with:
cache-map: |
{
"go-build-cache": "/root/.cache/go-build"
}
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and Publish Image Digest
id: build
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
provenance: false
# don't specify 'tags' here (error "get can't push tagged ref by digest")
# tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
annotations: ${{ steps.meta.outputs.annotations }}
platforms: ${{ matrix.platform }}
outputs: |
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
cache-from: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
cache-to: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
- name: Export Image Digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload Image Digest
uses: actions/upload-artifact@v4
with:
name: digests-${{ env.PLATFORM }}
path: /tmp/digests/*
retention-days: 1
if-no-files-found: error
merge-image:
name: Merge & Push Final Docker Image
runs-on: ubuntu-24.04
needs:
- build-image
steps:
- name: Download Image Digests
uses: actions/download-artifact@v4
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- name: Set REPO_NAME Variable
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
with:
images: |
${{ env.REPO_NAME }}
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Extract Tags
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
tags_string = " ".join(tags)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"TAGS={tags_string}\n")
- name: Extract Annotations
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
annotations_string = " ".join(annotations)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"ANNOTATIONS={annotations_string}\n")
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create & Push Manifest List
working-directory: /tmp/digests
run: |
docker buildx imagetools create \
${{ env.TAGS }} \
${{ env.ANNOTATIONS }} \
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
- name: Inspect and Run Multi-Platform Image
run: |
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version

View File

@@ -1,49 +0,0 @@
---
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
name: Release Build for Docker Plugin
on:
release:
types: [published]
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
build_docker_volume_plugin:
if: inputs.manual || github.repository == 'rclone/rclone'
name: Build docker plugin job
runs-on: ubuntu-latest
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

View File

@@ -0,0 +1,89 @@
name: Docker release build
on:
release:
types: [published]
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get actual patch version
id: actual_patch_version
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
- name: Get actual minor version
id: actual_minor_version
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Build and publish image
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
push: true
tags: |
rclone/rclone:latest
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'
needs: build
runs-on: ubuntu-latest
name: Build docker plugin job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

View File

@@ -1,47 +1,19 @@
FROM golang:alpine AS builder
ARG CGO_ENABLED=0
COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/
RUN echo "**** Set Go Environment Variables ****" && \
go env -w GOCACHE=/root/.cache/go-build
RUN echo "**** Install Dependencies ****" && \
apk add --no-cache \
make \
bash \
gawk \
git
COPY go.mod .
COPY go.sum .
RUN echo "**** Download Go Dependencies ****" && \
go mod download -x
RUN echo "**** Verify Go Dependencies ****" && \
go mod verify
COPY . .
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
echo "**** Build Binary ****" && \
make
RUN echo "**** Print Version Binary ****" && \
./rclone version
RUN apk add --no-cache make bash gawk git
RUN \
CGO_ENABLED=0 \
make
RUN ./rclone version
# Begin final image
FROM alpine:latest
RUN echo "**** Install Dependencies ****" && \
apk add --no-cache \
ca-certificates \
fuse3 \
tzdata && \
echo "Enable user_allow_other in fuse" && \
echo "user_allow_other" >> /etc/fuse.conf
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/

View File

@@ -131,8 +131,8 @@ Now
* git co ${BASE_TAG}-stable
* git cherry-pick any fixes
* make startstable
* Do the steps as above
* make startstable
* git co master
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
* git checkout ${BASE_TAG}-stable docs/content/changelog.md

File diff suppressed because it is too large Load Diff

View File

@@ -3,149 +3,16 @@
package azureblob
import (
"context"
"encoding/base64"
"strings"
"testing"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestBlockIDCreator(t *testing.T) {
// Check creation and random number
bic, err := newBlockIDCreator()
require.NoError(t, err)
bic2, err := newBlockIDCreator()
require.NoError(t, err)
assert.NotEqual(t, bic.random, bic2.random)
assert.NotEqual(t, bic.random, [8]byte{})
// Set random to known value for tests
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
chunkNumber := uint64(0xFEDCBA9876543210)
// Check creation of ID
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
got := bic.newBlockID(chunkNumber)
assert.Equal(t, want, got)
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
// Test checkID is working
assert.NoError(t, bic.checkID(chunkNumber, got))
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
}
func (f *Fs) testFeatures(t *testing.T) {
// Check first feature flags are set on this remote
func (f *Fs) InternalTest(t *testing.T) {
// Check first feature flags are set on this
// remote
enabled := f.Features().SetTier
assert.True(t, enabled)
enabled = f.Features().GetTier
assert.True(t, enabled)
}
type ReadSeekCloser struct {
*strings.Reader
}
func (r *ReadSeekCloser) Close() error {
return nil
}
// Stage a block at remote but don't commit it
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
var (
containerName, blobPath = f.split(remote)
containerClient = f.cntSVC(containerName)
blobClient = containerClient.NewBlockBlobClient(blobPath)
data = "uncommitted data"
blockID = "1"
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
)
r := &ReadSeekCloser{strings.NewReader(data)}
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
require.NoError(t, err)
// Verify the block is staged but not committed
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
require.NoError(t, err)
found := false
for _, block := range blockList.UncommittedBlocks {
if *block.Name == blockIDBase64 {
found = true
break
}
}
require.True(t, found, "Block ID not found in uncommitted blocks")
}
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
//
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
//
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
var (
ctx = context.Background()
remote = "testBlob"
)
// Multipart copy the blob please
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
f.opt.UseCopyBlob = false
f.opt.CopyCutoff = f.opt.ChunkSize
defer func() {
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
}()
// Create a blob with uncommitted blocks
f.stageBlockWithoutCommit(ctx, t, remote)
// Now attempt to overwrite the block with a different sized block ID to provoke this error
// Check the object does not exist
_, err := f.NewObject(ctx, remote)
require.Equal(t, fs.ErrorObjectNotFound, err)
// Upload a multipart file over the block with uncommitted chunks of a different ID size
size := 4*int(f.opt.ChunkSize) - 1
contents := random.String(size)
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
// Check size
assert.Equal(t, int64(size), o.Size())
// Create a new blob with uncommitted blocks
newRemote := "testBlob2"
f.stageBlockWithoutCommit(ctx, t, newRemote)
// Copy over that block
dst, err := f.Copy(ctx, o, newRemote)
require.NoError(t, err)
// Check basics
assert.Equal(t, int64(size), dst.Size())
assert.Equal(t, newRemote, dst.Remote())
// Check contents
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents)
// Remove the object
require.NoError(t, dst.Remove(ctx))
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Features", f.testFeatures)
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
}

View File

@@ -15,17 +15,13 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool", "Cold"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "use_copy_blob", Value: "false"},
},
})
}
@@ -44,7 +40,6 @@ func TestIntegration2(t *testing.T) {
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
{Name: name, Key: "use_copy_blob", Value: "false"},
},
})
}
@@ -53,13 +48,8 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
)
func TestValidateAccessTier(t *testing.T) {

View File

@@ -237,30 +237,6 @@ msi_client_id, or msi_mi_res_id parameters.`,
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
Advanced: true,
Sensitive: true,
}, {
Name: "disable_instance_discovery",
Help: `Skip requesting Microsoft Entra instance metadata
This should be set true only by applications authenticating in
disconnected clouds, or private clouds such as Azure Stack.
It determines whether rclone requests Microsoft Entra instance
metadata from ` + "`https://login.microsoft.com/`" + ` before
authenticating.
Setting this to true will skip this request, making you responsible
for ensuring the configured authority is valid and trustworthy.
`,
Default: false,
Advanced: true,
}, {
Name: "use_az",
Help: `Use Azure CLI tool az for authentication
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
as the sole means of authentication.
Setting this can be useful if you wish to use the az CLI on a host with
a System Managed Identity that you do not want to use.
Don't set env_auth at the same time.
`,
Default: false,
Advanced: true,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
@@ -343,12 +319,10 @@ type Options struct {
Username string `config:"username"`
Password string `config:"password"`
ServicePrincipalFile string `config:"service_principal_file"`
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
UseMSI bool `config:"use_msi"`
MSIObjectID string `config:"msi_object_id"`
MSIClientID string `config:"msi_client_id"`
MSIResourceID string `config:"msi_mi_res_id"`
UseAZ bool `config:"use_az"`
Endpoint string `config:"endpoint"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
@@ -440,8 +414,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
}
// Read credentials from the environment
options := azidentity.DefaultAzureCredentialOptions{
ClientOptions: policyClientOptions,
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
ClientOptions: policyClientOptions,
}
cred, err = azidentity.NewDefaultAzureCredential(&options)
if err != nil {
@@ -452,13 +425,6 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
if err != nil {
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
}
case opt.UseAZ:
var options = azidentity.AzureCLICredentialOptions{}
cred, err = azidentity.NewAzureCLICredential(&options)
fmt.Println(cred)
if err != nil {
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
}
case opt.SASURL != "":
client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
if err != nil {
@@ -933,7 +899,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
// Hash returns the MD5 of an object returning a lowercase hex string
//
// May make a network request because the [fs.List] method does not
// May make a network request becaue the [fs.List] method does not
// return MD5 hashes for DirEntry
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 {

View File

@@ -61,7 +61,7 @@ const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
func randomString(charCount int) string {
strBldr := strings.Builder{}
for range charCount {
for i := 0; i < charCount; i++ {
randPos := rand.Int63n(52)
strBldr.WriteByte(chars[randPos])
}

View File

@@ -130,10 +130,10 @@ type AuthorizeAccountResponse struct {
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.

View File

@@ -16,7 +16,6 @@ import (
"io"
"net/http"
"path"
"slices"
"strconv"
"strings"
"sync"
@@ -31,8 +30,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/multipart"
@@ -301,13 +299,14 @@ type Fs struct {
// Object describes a b2 object
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
id string // b2 id of the file
modTime time.Time // The modified time of the object if known
sha1 string // SHA-1 hash if known
size int64 // Size of the object
mimeType string // Content-Type of the object
fs *Fs // what this object is part of
remote string // The remote path
id string // b2 id of the file
modTime time.Time // The modified time of the object if known
sha1 string // SHA-1 hash if known
size int64 // Size of the object
mimeType string // Content-Type of the object
meta map[string]string // The object metadata if known - may be nil - with lower case keys
}
// ------------------------------------------------------------
@@ -590,7 +589,12 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
// hasPermission returns if the current AuthorizationToken has the selected permission
func (f *Fs) hasPermission(permission string) bool {
return slices.Contains(f.info.Allowed.Capabilities, permission)
for _, capability := range f.info.Allowed.Capabilities {
if capability == permission {
return true
}
}
return false
}
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
@@ -918,7 +922,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
last := ""
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
@@ -1271,7 +1275,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
toBeDeleted := make(chan *api.File, f.ci.Transfers)
var wg sync.WaitGroup
wg.Add(f.ci.Transfers)
for range f.ci.Transfers {
for i := 0; i < f.ci.Transfers; i++ {
go func() {
defer wg.Done()
for object := range toBeDeleted {
@@ -1314,22 +1318,16 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
// Check current version of the file
if deleteHidden && object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
if !operations.SkipDestructive(ctx, object.Name, "remove hide marker") {
toBeDeleted <- object
}
toBeDeleted <- object
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
if !operations.SkipDestructive(ctx, object.Name, "remove pending upload") {
toBeDeleted <- object
}
toBeDeleted <- object
} else {
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
}
} else {
fs.Debugf(remote, "Deleting (id %q)", object.ID)
if !operations.SkipDestructive(ctx, object.Name, "delete") {
toBeDeleted <- object
}
toBeDeleted <- object
}
last = remote
tr.Done(ctx, nil)
@@ -1600,6 +1598,9 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
if err != nil {
return err
}
// For now, just set "mtime" in metadata
o.meta = make(map[string]string, 1)
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
return nil
}
@@ -1879,6 +1880,13 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
Info: Info,
}
// Embryonic metadata support - just mtime
o.meta = make(map[string]string, 1)
modTime, err := parseTimeStringHelper(info.Info[timeKey])
if err == nil {
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
}
// When reading files from B2 via cloudflare using
// --b2-download-url cloudflare strips the Content-Length
// headers (presumably so it can inject stuff) so use the old
@@ -1935,7 +1943,7 @@ func init() {
// urlEncode encodes in with % encoding
func urlEncode(in string) string {
var out bytes.Buffer
for i := range len(in) {
for i := 0; i < len(in); i++ {
c := in[i]
if noNeedToEncode[c] {
_ = out.WriteByte(c)
@@ -2256,7 +2264,7 @@ See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
},
}
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
var newRule api.LifecycleRule
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
@@ -2285,10 +2293,8 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
}
skip := operations.SkipDestructive(ctx, name, "update lifecycle rules")
var bucket *api.Bucket
if !skip && (newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil) {
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil {
bucketID, err := f.getBucketID(ctx, bucketName)
if err != nil {
return nil, err
@@ -2345,7 +2351,7 @@ Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
},
}
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
maxAge := defaultMaxAge
if opt["max-age"] != "" {
maxAge, err = fs.ParseDuration(opt["max-age"])
@@ -2368,7 +2374,7 @@ it would do.
`,
}
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
return nil, f.cleanUp(ctx, true, false, 0)
}
@@ -2387,7 +2393,7 @@ var commandHelp = []fs.CommandHelp{
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "lifecycle":
return f.lifecycleCommand(ctx, name, arg, opt)

View File

@@ -5,7 +5,6 @@ import (
"crypto/sha1"
"fmt"
"path"
"sort"
"strings"
"testing"
"time"
@@ -14,7 +13,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/bucket"
@@ -258,6 +256,12 @@ func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string
assert.Equal(t, v, got, k)
}
// mtime
for k, v := range metadata {
got := o.meta[k]
assert.Equal(t, v, got, k)
}
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
// Modification time from the x-bz-info-src_last_modified_millis header
@@ -459,161 +463,24 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
})
t.Run("Cleanup", func(t *testing.T) {
t.Run("DryRun", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should be unchanged after dry run
before := listAllFiles(ctx, t, f, dirName)
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, true, false, 0))
after := listAllFiles(ctx, t, f, dirName)
assert.Equal(t, before, after)
})
t.Run("RealThing", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should reflect current state after cleanup
require.NoError(t, f.cleanUp(ctx, true, false, 0))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
fstest.CheckListing(t, f, items)
})
require.NoError(t, f.cleanUp(ctx, true, false, 0))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
fstest.CheckListing(t, f, items)
// Set --b2-versions for this test
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
fstest.CheckListing(t, f, items)
})
// Purge gets tested later
}
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
ctx := context.Background()
// B2CleanupHidden tests cleaning up hidden files
t.Run("CleanupUnfinished", func(t *testing.T) {
dirName := "unfinished"
fileCount := 5
expectedFiles := []string{}
for i := 1; i < fileCount; i++ {
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
expectedFiles = append(expectedFiles, fileName)
obj := &Object{
fs: f,
remote: fileName,
}
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
require.NoError(t, err)
}
checkListing(ctx, t, f, dirName, expectedFiles)
t.Run("DryRun", func(t *testing.T) {
// Listing should not change after dry run
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, expectedFiles)
})
t.Run("RealThing", func(t *testing.T) {
// Listing should be empty after real cleanup
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, []string{})
})
})
}
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
bucket, directory := f.split(dirName)
foundFiles := []string{}
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
foundFiles = append(foundFiles, object.Name)
}
return nil
}))
sort.Strings(foundFiles)
return foundFiles
}
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
foundFiles := listAllFiles(ctx, t, f, dirName)
sort.Strings(expectedFiles)
assert.Equal(t, expectedFiles, foundFiles)
}
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
ctx := context.Background()
opt := map[string]string{}
t.Run("InitState", func(t *testing.T) {
// There should be no lifecycle rules at the outset
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("DryRun", func(t *testing.T) {
// There should still be no lifecycle rules after each dry run operation
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("RealThing", func(t *testing.T) {
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
})
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Metadata", f.InternalTestMetadata)
t.Run("Versions", f.InternalTestVersions)
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -478,14 +478,17 @@ func (up *largeUpload) Copy(ctx context.Context) (err error) {
remaining = up.size
)
g.SetLimit(up.f.opt.UploadConcurrency)
for part := range up.parts {
for part := 0; part < up.parts; part++ {
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in copying all the other parts.
if gCtx.Err() != nil {
break
}
reqSize := min(remaining, up.chunkSize)
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
part := part // for the closure
g.Go(func() (err error) {

View File

@@ -27,7 +27,6 @@ import (
"sync/atomic"
"time"
"github.com/golang-jwt/jwt/v4"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -75,7 +74,7 @@ var (
)
type boxCustomClaims struct {
jwt.StandardClaims
jwtutil.LegacyStandardClaims
BoxSubType string `json:"box_sub_type,omitempty"`
}
@@ -223,10 +222,8 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
}
claims = &boxCustomClaims{
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
StandardClaims: jwt.StandardClaims{
Id: val,
LegacyStandardClaims: jwtutil.LegacyStandardClaims{
ID: val,
Issuer: boxConfig.BoxAppSettings.ClientID,
Subject: boxConfig.EnterpriseID,
Audience: tokenURL,
@@ -237,8 +234,8 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
return claims, nil
}
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]any {
signingHeaders := map[string]any{
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
signingHeaders := map[string]interface{}{
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
}
return signingHeaders
@@ -1343,8 +1340,12 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
nextStreamPosition = streamPosition
for {
limit := f.opt.ListChunk
// box only allows a max of 500 events
limit := min(f.opt.ListChunk, 500)
if limit > 500 {
limit = 500
}
opts := rest.Opts{
Method: "GET",

View File

@@ -105,7 +105,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
const defaultDelay = 10
var tries int
outer:
for tries = range maxTries {
for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
if err != nil {
@@ -203,7 +203,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
errs := make(chan error, 1)
var wg sync.WaitGroup
outer:
for part := range session.TotalParts {
for part := 0; part < session.TotalParts; part++ {
// Check any errors
select {
case err = <-errs:
@@ -211,7 +211,10 @@ outer:
default:
}
reqSize := min(remaining, chunkSize)
reqSize := remaining
if reqSize >= chunkSize {
reqSize = chunkSize
}
// Make a block of memory
buf := make([]byte, reqSize)

View File

@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
@@ -1087,13 +1086,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return cachedEntries, nil
}
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error {
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
entries, err := f.List(ctx, dir)
if err != nil {
return err
}
for i := range entries {
for i := 0; i < len(entries); i++ {
innerDir, ok := entries[i].(fs.Directory)
if ok {
err := f.recurse(ctx, innerDir.Remote(), list)
@@ -1139,7 +1138,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
}
// if we're here, we're gonna do a standard recursive traversal and cache everything
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
err = f.recurse(ctx, dir, list)
if err != nil {
return err
@@ -1429,7 +1428,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
}()
// wait until both are done
for range 2 {
for c := 0; c < 2; c++ {
<-done
}
}
@@ -1754,7 +1753,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
}
// Stats returns stats about the cache storage
func (f *Fs) Stats() (map[string]map[string]any, error) {
func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
return f.cache.Stats()
}
@@ -1934,7 +1933,7 @@ var commandHelp = []fs.CommandHelp{
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name {
case "stats":
return f.Stats()

View File

@@ -360,7 +360,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
require.NoError(t, err)
require.Equal(t, int64(len(checkSample)), o.Size())
for i := range checkSample {
for i := 0; i < len(checkSample); i++ {
require.Equal(t, testData[i], checkSample[i])
}
}
@@ -387,7 +387,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
require.NoError(t, err)
for i := range readData {
for i := 0; i < len(readData); i++ {
require.Equalf(t, testData[i], readData[i], "at byte %v", i)
}
}
@@ -688,7 +688,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
co, ok := o.(*cache.Object)
require.True(t, ok)
for i := range 4 { // read first 4
for i := 0; i < 4; i++ { // read first 4
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
}
cfs.CleanUpCache(true)
@@ -971,7 +971,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
f, err := os.CreateTemp("", "rclonecache-tempfile")
require.NoError(t, err)
for range int(cnt) {
for i := 0; i < int(cnt); i++ {
data := randStringBytes(int(chunk))
_, _ = f.Write(data)
}
@@ -1085,9 +1085,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
return err
}
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) {
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
var err error
var l []any
var l []interface{}
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
@@ -1215,7 +1215,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
var err error
var state cache.BackgroundUploadState
for range 2 {
for i := 0; i < 2; i++ {
select {
case state = <-buCh:
// continue
@@ -1293,7 +1293,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
var err error
for range maxRetries {
for i := 0; i < maxRetries; i++ {
err = block()
if err == nil {
return nil

View File

@@ -17,7 +17,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"},
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache

View File

@@ -162,7 +162,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
lastFile := ""
for i := range totalFiles {
for i := 0; i < totalFiles; i++ {
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"

View File

@@ -182,7 +182,7 @@ func (r *Handle) queueOffset(offset int64) {
}
}
for i := range r.workers {
for i := 0; i < r.workers; i++ {
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
if o < 0 || o >= r.cachedObject.Size() {
continue
@@ -222,7 +222,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
if !found {
// we're gonna give the workers a chance to pickup the chunk
// and retry a couple of times
for i := range r.cacheFs().opt.ReadRetries * 8 {
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
if err == nil {
found = true

View File

@@ -209,7 +209,7 @@ func (p *plexConnector) authenticate() error {
if err != nil {
return err
}
var data map[string]any
var data map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil {
return fmt.Errorf("failed to obtain token: %w", err)
@@ -273,11 +273,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
}
// adapted from: https://stackoverflow.com/a/28878037 (credit)
func get(m any, path ...any) (any, bool) {
func get(m interface{}, path ...interface{}) (interface{}, bool) {
for _, p := range path {
switch idx := p.(type) {
case string:
if mm, ok := m.(map[string]any); ok {
if mm, ok := m.(map[string]interface{}); ok {
if val, found := mm[idx]; found {
m = val
continue
@@ -285,7 +285,7 @@ func get(m any, path ...any) (any, bool) {
}
return nil, false
case int:
if mm, ok := m.([]any); ok {
if mm, ok := m.([]interface{}); ok {
if len(mm) > idx {
m = mm[idx]
continue

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
"go.etcd.io/bbolt/errors"
)
// Constants
@@ -598,7 +597,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
})
if err != nil {
if err == errors.ErrDatabaseNotOpen {
if err == bolt.ErrDatabaseNotOpen {
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
return
}
@@ -607,16 +606,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
}
// Stats returns a go map with the stats key values
func (b *Persistent) Stats() (map[string]map[string]any, error) {
r := make(map[string]map[string]any)
r["data"] = make(map[string]any)
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
r := make(map[string]map[string]interface{})
r["data"] = make(map[string]interface{})
r["data"]["oldest-ts"] = time.Now()
r["data"]["oldest-file"] = ""
r["data"]["newest-ts"] = time.Now()
r["data"]["newest-file"] = ""
r["data"]["total-chunks"] = 0
r["data"]["total-size"] = int64(0)
r["files"] = make(map[string]any)
r["files"] = make(map[string]interface{})
r["files"]["oldest-ts"] = time.Now()
r["files"]["oldest-name"] = ""
r["files"]["newest-ts"] = time.Now()

View File

@@ -356,8 +356,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
DirModTimeUpdatesOnWrite: true,
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
f.features.ListR = nil // Recursive listing may cause chunker skip files
f.features.ListP = nil // ListP not supported yet
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
return f, err
}
@@ -633,7 +632,7 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
// forbidChunk prints error message or raises error if file is chunk.
// First argument sets log prefix, use `false` to suppress message.
func (f *Fs) forbidChunk(o any, filePath string) error {
func (f *Fs) forbidChunk(o interface{}, filePath string) error {
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
if f.opt.FailHard {
return fmt.Errorf("chunk overlap with %q", parentPath)
@@ -681,7 +680,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
circleSec := unixSec % closestPrimeZzzzSeconds
first4chars := strconv.FormatInt(circleSec, 36)
for range maxTransactionProbes {
for tries := 0; tries < maxTransactionProbes; tries++ {
f.xactIDMutex.Lock()
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
f.xactIDMutex.Unlock()
@@ -1190,7 +1189,10 @@ func (f *Fs) put(
}
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
size := min(c.sizeLeft, c.chunkSize)
size := c.sizeLeft
if size > c.chunkSize {
size = c.chunkSize
}
savedReadCount := c.readCount
// If a single chunk is expected, avoid the extra rename operation
@@ -1475,7 +1477,10 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
const bufLen = 1048576 // 1 MiB
buf := make([]byte, bufLen)
for size > 0 {
n := min(size, bufLen)
n := size
if n > bufLen {
n = bufLen
}
if _, err := io.ReadFull(in, buf[0:n]); err != nil {
return err
}
@@ -2475,7 +2480,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
if len(data) > maxMetadataSizeWritten {
return nil, false, ErrMetaTooBig
}
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
return nil, false, errors.New("invalid json")
}
var metadata metaSimpleJSON

View File

@@ -40,7 +40,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
})
}
type settings map[string]any
type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash

View File

@@ -46,7 +46,6 @@ func TestIntegration(t *testing.T) {
"DirCacheFlush",
"UserInfo",
"Disconnect",
"ListP",
},
}
if *fstest.RemoteName == "" {

View File

@@ -18,7 +18,7 @@ type CloudinaryEncoder interface {
ToStandardPath(string) string
// ToStandardName takes name in this encoding and converts
// it in Standard encoding.
ToStandardName(string, string) string
ToStandardName(string) string
// Encoded root of the remote (as passed into NewFs)
FromStandardFullPath(string) string
}

View File

@@ -8,9 +8,7 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"path"
"slices"
"strconv"
"strings"
"time"
@@ -105,39 +103,19 @@ func init() {
Advanced: true,
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
},
{
Name: "adjust_media_files_extensions",
Default: true,
Advanced: true,
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
},
{
Name: "media_extensions",
Default: []string{
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
Advanced: true,
Help: "Cloudinary supported media extensions",
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
MediaExtensions []string `config:"media_extensions"`
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
}
// Fs represents a remote cloudinary server
@@ -225,18 +203,6 @@ func (f *Fs) FromStandardPath(s string) string {
// FromStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardName(s string) string {
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(s)
ext := ""
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
s = strings.TrimSuffix(parsedURL.Path, ext)
}
}
}
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
}
@@ -246,20 +212,8 @@ func (f *Fs) ToStandardPath(s string) string {
}
// ToStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardName(s string, assetURL string) string {
ext := ""
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(assetURL)
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
ext = ""
}
}
}
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
func (f *Fs) ToStandardName(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&")
}
// FromStandardFullPath encodes a full path to Cloudinary standard
@@ -377,7 +331,10 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
}
for _, asset := range results.Assets {
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
remote := api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName)
if dir != "" {
remote = path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName))
}
o := &Object{
fs: f,
remote: remote,

View File

@@ -20,7 +20,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"golang.org/x/sync/errgroup"
@@ -266,9 +265,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
}
}
// Enable ListP always
features.ListP = f.ListP
// Enable Purge when any upstreams support it
if features.Purge == nil {
for _, u := range f.upstreams {
@@ -813,52 +809,24 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
if f.root == "" && dir == "" {
entries := make(fs.DirEntries, 0, len(f.upstreams))
entries = make(fs.DirEntries, 0, len(f.upstreams))
for combineDir := range f.upstreams {
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
entries = append(entries, d)
}
return callback(entries)
return entries, nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
return nil, err
}
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := u.wrapEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
entries, err = u.f.List(ctx, uRemote)
if err != nil {
return nil, err
}
listP := u.f.Features().ListP
if listP == nil {
entries, err := u.f.List(ctx, uRemote)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
return u.wrapEntries(ctx, entries)
}
// ListR lists the objects and directories of the Fs starting

View File

@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
@@ -209,8 +208,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
if !operations.CanServerSideMove(wrappedFs) {
f.features.Disable("PutStream")
}
// Enable ListP always
f.features.ListP = f.ListP
return f, err
}
@@ -355,39 +352,11 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
// found.
// List entries and process them
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.processEntries(entries)
if err != nil {
return err
}
return callback(entries)
entries, err = f.Fs.List(ctx, dir)
if err != nil {
return nil, err
}
listP := f.Fs.Features().ListP
if listP == nil {
entries, err := f.Fs.List(ctx, dir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
return f.processEntries(entries)
}
// ListR lists the objects and directories of the Fs starting

View File

@@ -192,7 +192,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
dirNameEncrypt: dirNameEncrypt,
encryptedSuffix: ".bin",
}
c.buffers.New = func() any {
c.buffers.New = func() interface{} {
return new([blockSize]byte)
}
err := c.Key(password, salt)
@@ -336,7 +336,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
_, _ = result.WriteString(strconv.Itoa(dir) + ".")
// but we'll augment it with the nameKey for real calculation
for i := range len(c.nameKey) {
for i := 0; i < len(c.nameKey); i++ {
dir += int(c.nameKey[i])
}
@@ -418,7 +418,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
}
// add the nameKey to get the real rotate distance
for i := range len(c.nameKey) {
for i := 0; i < len(c.nameKey); i++ {
dir += int(c.nameKey[i])
}
@@ -664,7 +664,7 @@ func (n *nonce) increment() {
// add a uint64 to the nonce
func (n *nonce) add(x uint64) {
carry := uint16(0)
for i := range 8 {
for i := 0; i < 8; i++ {
digit := (*n)[i]
xDigit := byte(x)
x >>= 8

View File

@@ -1307,7 +1307,10 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
end := len(ciphertext)
if underlyingLimit >= 0 {
end = min(int(underlyingOffset+underlyingLimit), len(ciphertext))
end = int(underlyingOffset + underlyingLimit)
if end > len(ciphertext) {
end = len(ciphertext)
}
}
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil
@@ -1487,7 +1490,7 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err)
// Test truncating the file at each possible point
for i := range len(file16) - 1 {
for i := 0; i < len(file16)-1; i++ {
what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
fh, err := c.newDecrypter(cd)

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
)
// Globals
@@ -294,9 +293,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// Enable ListP always
f.features.ListP = f.ListP
return f, err
}
@@ -420,40 +416,11 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.encryptEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
if err != nil {
return nil, err
}
listP := f.Fs.Features().ListP
encryptedDir := f.cipher.EncryptDirName(dir)
if listP == nil {
entries, err := f.Fs.List(ctx, encryptedDir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, encryptedDir, wrappedCallback)
return f.encryptEntries(ctx, entries)
}
// ListR lists the objects and directories of the Fs starting
@@ -957,7 +924,7 @@ Usage Example:
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "decode":
out := make([]string, 0, len(arg))

View File

@@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte {
}
length := len(buf)
padding := n - (length % n)
for range padding {
for i := 0; i < padding; i++ {
buf = append(buf, byte(padding))
}
if (len(buf) % n) != 0 {
@@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) {
if padding == 0 {
return nil, ErrorPaddingTooShort
}
for i := range padding {
for i := 0; i < padding; i++ {
if buf[length-1-i] != byte(padding) {
return nil, ErrorPaddingNotAllTheSame
}

View File

@@ -18,7 +18,6 @@ import (
"net/http"
"os"
"path"
"slices"
"sort"
"strconv"
"strings"
@@ -38,8 +37,8 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
@@ -200,7 +199,13 @@ func driveScopes(scopesString string) (scopes []string) {
// Returns true if one of the scopes was "drive.appfolder"
func driveScopesContainsAppFolder(scopes []string) bool {
return slices.Contains(scopes, scopePrefix+"drive.appfolder")
for _, scope := range scopes {
if scope == scopePrefix+"drive.appfolder" {
return true
}
}
return false
}
func driveOAuthOptions() []fs.Option {
@@ -954,7 +959,12 @@ func parseDrivePath(path string) (root string, err error) {
type listFn func(*drive.File) bool
func containsString(slice []string, s string) bool {
return slices.Contains(slice, s)
for _, e := range slice {
if e == s {
return true
}
}
return false
}
// getFile returns drive.File for the ID passed and fields passed in
@@ -1143,7 +1153,13 @@ OUTER:
// Check the case of items is correct since
// the `=` operator is case insensitive.
if title != "" && title != item.Name {
found := slices.Contains(stems, item.Name)
found := false
for _, stem := range stems {
if stem == item.Name {
found = true
break
}
}
if !found {
continue
}
@@ -1196,7 +1212,6 @@ func fixMimeType(mimeTypeIn string) string {
}
return mimeTypeOut
}
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
out = make(map[string][]string, len(in))
for k, v := range in {
@@ -1207,11 +1222,9 @@ func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
}
return out
}
func isInternalMimeType(mimeType string) bool {
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
}
func isLinkMimeType(mimeType string) bool {
return strings.HasPrefix(mimeType, "application/x-link-")
}
@@ -1546,10 +1559,13 @@ func (f *Fs) getFileFields(ctx context.Context) (fields googleapi.Field) {
func (f *Fs) newRegularObject(ctx context.Context, remote string, info *drive.File) (obj fs.Object, err error) {
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
if f.opt.SkipChecksumGphotos {
if slices.Contains(info.Spaces, "photos") {
info.Md5Checksum = ""
info.Sha1Checksum = ""
info.Sha256Checksum = ""
for _, space := range info.Spaces {
if space == "photos" {
info.Md5Checksum = ""
info.Sha1Checksum = ""
info.Sha256Checksum = ""
break
}
}
}
o := &Object{
@@ -1641,8 +1657,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.F
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithExportInfo(
ctx context.Context, remote string, info *drive.File,
extension, exportName, exportMimeType string, isDocument bool,
) (o fs.Object, err error) {
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
// Note that resolveShortcut will have been called already if
// we are being called from a listing. However the drive.Item
// will have been resolved so this will do nothing.
@@ -1833,7 +1848,6 @@ func linkTemplate(mt string) *template.Template {
})
return _linkTemplates[mt]
}
func (f *Fs) fetchFormats(ctx context.Context) {
fetchFormatsOnce.Do(func() {
var about *drive.About
@@ -1879,8 +1893,7 @@ func (f *Fs) importFormats(ctx context.Context) map[string][]string {
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", false)
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
extension, mimeType string, isDocument bool,
) {
extension, mimeType string, isDocument bool) {
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
if isDocument {
for _, _extension := range f.exportExtensions {
@@ -2189,7 +2202,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
wg := sync.WaitGroup{}
in := make(chan listREntry, listRInputBuffer)
out := make(chan error, f.ci.Checkers)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
overflow := []listREntry{}
listed := 0
@@ -2227,7 +2240,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
wg.Add(1)
in <- listREntry{directoryID, dir}
for range f.ci.Checkers {
for i := 0; i < f.ci.Checkers; i++ {
go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
}
go func() {
@@ -2236,8 +2249,11 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// if the input channel overflowed add the collected entries to the channel now
for len(overflow) > 0 {
mu.Lock()
l := len(overflow)
// only fill half of the channel to prevent entries being put into overflow again
l := min(len(overflow), listRInputBuffer/2)
if l > listRInputBuffer/2 {
l = listRInputBuffer / 2
}
wg.Add(l)
for _, d := range overflow[:l] {
in <- d
@@ -2257,7 +2273,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
mu.Unlock()
}()
// wait until the all workers to finish
for range f.ci.Checkers {
for i := 0; i < f.ci.Checkers; i++ {
e := <-out
mu.Lock()
// if one worker returns an error early, close the input so all other workers exit
@@ -2673,7 +2689,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if shortcutID != "" {
return f.delete(ctx, shortcutID, f.opt.UseTrash)
}
trashedFiles := false
var trashedFiles = false
if check {
found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
if !item.Trashed {
@@ -2910,6 +2926,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return err
}
@@ -3170,7 +3187,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
}
}()
}
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
var startPageToken *drive.StartPageToken
err = f.pacer.Call(func() (bool, error) {
@@ -3893,7 +3909,7 @@ Third delete all orphaned files to the trash
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "get":
out := make(map[string]string)
@@ -4002,13 +4018,14 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
case "query":
if len(arg) == 1 {
query := arg[0]
results, err := f.query(ctx, query)
var results, err = f.query(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
}
return results, nil
} else {
return nil, errors.New("need a query argument")
}
return nil, errors.New("need a query argument")
case "rescue":
dirID := ""
_, delete := opt["delete"]
@@ -4068,7 +4085,6 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
}
return "", hash.ErrUnsupported
}
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
return "", hash.ErrUnsupported
@@ -4083,8 +4099,7 @@ func (o *baseObject) Size() int64 {
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error,
) {
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
@@ -4297,13 +4312,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
return o.baseObject.open(ctx, o.url, options...)
}
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Update the size with what we are reading as it can change from
// the HEAD in the listing to this GET. This stops rclone marking
// the transfer as corrupted.
var offset, end int64 = 0, -1
newOptions := options[:0]
var newOptions = options[:0]
for _, o := range options {
// Note that Range requests don't work on Google docs:
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
@@ -4330,10 +4344,9 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
}
return
}
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
data := o.content
var data = o.content
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
@@ -4358,8 +4371,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
}
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
src fs.ObjectInfo,
) (info *drive.File, err error) {
src fs.ObjectInfo) (info *drive.File, err error) {
// Make the API request to upload metadata and file data.
size := src.Size()
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
@@ -4437,7 +4449,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return nil
}
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
srcMimeType := fs.MimeType(ctx, src)
importMimeType := ""
@@ -4533,7 +4544,6 @@ func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err er
func (o *documentObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
}
func (o *linkObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
"maps"
"strconv"
"strings"
"sync"
@@ -325,7 +324,9 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
metadata := make(fs.Metadata, 16)
// Dump user metadata first as it overrides system metadata
maps.Copy(metadata, info.Properties)
for k, v := range info.Properties {
metadata[k] = v
}
// System metadata
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)

View File

@@ -177,7 +177,10 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
if start >= rx.ContentLength {
break
}
reqSize = min(rx.ContentLength-start, int64(rx.f.opt.ChunkSize))
reqSize = rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
} else {
// If size unknown read into buffer

View File

@@ -55,7 +55,10 @@ func (d *digest) Write(p []byte) (n int, err error) {
n = len(p)
for len(p) > 0 {
d.writtenMore = true
toWrite := min(bytesPerBlock-d.n, len(p))
toWrite := bytesPerBlock - d.n
if toWrite > len(p) {
toWrite = len(p)
}
_, err = d.blockHash.Write(p[:toWrite])
if err != nil {
panic(hashReturnedError)

View File

@@ -11,7 +11,7 @@ import (
func testChunk(t *testing.T, chunk int) {
data := make([]byte, chunk)
for i := range chunk {
for i := 0; i < chunk; i++ {
data[i] = 'A'
}
for _, test := range []struct {

View File

@@ -92,9 +92,6 @@ const (
maxFileNameLength = 255
)
type exportAPIFormat string
type exportExtension string // dotless
var (
// Description of how to auth for this app
dropboxConfig = &oauthutil.Config{
@@ -135,16 +132,6 @@ var (
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 100,
}
exportKnownAPIFormats = map[exportAPIFormat]exportExtension{
"markdown": "md",
"html": "html",
}
// Populated based on exportKnownAPIFormats
exportKnownExtensions = map[exportExtension]exportAPIFormat{}
paperExtension = ".paper"
paperTemplateExtension = ".papert"
)
// Gets an oauth config with the right scopes
@@ -260,61 +247,23 @@ folders.`,
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
Default: "",
Advanced: true,
}, {
Name: "export_formats",
Help: `Comma separated list of preferred formats for exporting files
Certain Dropbox files can only be accessed by exporting them to another format.
These include Dropbox Paper documents.
For each such file, rclone will choose the first format on this list that Dropbox
considers valid. If none is valid, it will choose Dropbox's default format.
Known formats include: "html", "md" (markdown)`,
Default: fs.CommaSepList{"html", "md"},
Advanced: true,
}, {
Name: "skip_exports",
Help: "Skip exportable files in all listings.\n\nIf given, exportable files practically become invisible to rclone.",
Default: false,
Advanced: true,
}, {
Name: "show_all_exports",
Default: false,
Help: `Show all exportable files in listings.
Adding this flag will allow all exportable files to be server side copied.
Note that rclone doesn't add extensions to the exportable file names in this mode.
Do **not** use this flag when trying to download exportable files - rclone
will fail to download them.
`,
Advanced: true,
},
}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
})
for apiFormat, ext := range exportKnownAPIFormats {
exportKnownExtensions[ext] = apiFormat
}
}
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"`
RootNsid string `config:"root_namespace"`
ExportFormats fs.CommaSepList `config:"export_formats"`
SkipExports bool `config:"skip_exports"`
ShowAllExports bool `config:"show_all_exports"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"`
RootNsid string `config:"root_namespace"`
}
// Fs represents a remote dropbox server
@@ -334,18 +283,8 @@ type Fs struct {
pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
exportExts []exportExtension
}
type exportType int
const (
notExport exportType = iota // a regular file
exportHide // should be hidden
exportListOnly // listable, but can't export
exportExportable // can export
)
// Object describes a dropbox object
//
// Dropbox Objects always have full metadata
@@ -357,9 +296,6 @@ type Object struct {
bytes int64 // size of the object
modTime time.Time // time it was last modified
hash string // content_hash of the object
exportType exportType
exportAPIFormat exportAPIFormat
}
// Name of the remote (as passed into NewFs)
@@ -500,14 +436,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
HeaderGenerator: f.headerGenerator,
}
for _, e := range opt.ExportFormats {
ext := exportExtension(e)
if exportKnownExtensions[ext] == "" {
return nil, fmt.Errorf("dropbox: unknown export format '%s'", e)
}
f.exportExts = append(f.exportExts, ext)
}
// unauthorized config for endpoints that fail with auth
ucfg := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
@@ -660,126 +588,38 @@ func (f *Fs) setRoot(root string) {
}
}
type getMetadataResult struct {
entry files.IsMetadata
notFound bool
err error
}
// getMetadata gets the metadata for a file or directory
func (f *Fs) getMetadata(ctx context.Context, objPath string) (res getMetadataResult) {
res.err = f.pacer.Call(func() (bool, error) {
res.entry, res.err = f.srv.GetMetadata(&files.GetMetadataArg{
func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMetadata, notFound bool, err error) {
err = f.pacer.Call(func() (bool, error) {
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
Path: f.opt.Enc.FromStandardPath(objPath),
})
return shouldRetry(ctx, res.err)
return shouldRetry(ctx, err)
})
if res.err != nil {
switch e := res.err.(type) {
if err != nil {
switch e := err.(type) {
case files.GetMetadataAPIError:
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
res.notFound = true
res.err = nil
notFound = true
err = nil
}
}
}
return
}
// Get metadata such that the result would be exported with the given extension
// Return a channel that will eventually receive the metadata
func (f *Fs) getMetadataForExt(ctx context.Context, filePath string, wantExportExtension exportExtension) chan getMetadataResult {
ch := make(chan getMetadataResult, 1)
wantDownloadable := (wantExportExtension == "")
go func() {
defer close(ch)
res := f.getMetadata(ctx, filePath)
info, ok := res.entry.(*files.FileMetadata)
if !ok { // Can't check anything about file, just return what we have
ch <- res
return
}
// Return notFound if downloadability or extension doesn't match
if wantDownloadable != info.IsDownloadable {
ch <- getMetadataResult{notFound: true}
return
}
if !info.IsDownloadable {
_, ext := f.chooseExportFormat(info)
if ext != wantExportExtension {
ch <- getMetadataResult{notFound: true}
return
}
}
// Return our real result or error
ch <- res
}()
return ch
}
// For a given rclone-path, figure out what the Dropbox-path may be, in order of preference.
// Multiple paths might be plausible, due to export path munging.
func (f *Fs) possibleMetadatas(ctx context.Context, filePath string) (ret []<-chan getMetadataResult) {
ret = []<-chan getMetadataResult{}
// Prefer an exact match
ret = append(ret, f.getMetadataForExt(ctx, filePath, ""))
// Check if we're plausibly an export path, otherwise we're done
if f.opt.SkipExports || f.opt.ShowAllExports {
return
}
dotted := path.Ext(filePath)
if dotted == "" {
return
}
ext := exportExtension(dotted[1:])
if exportKnownExtensions[ext] == "" {
return
}
// We might be an export path! Try all possibilities
base := strings.TrimSuffix(filePath, dotted)
// `foo.papert.md` will only come from `foo.papert`. Never check something like `foo.papert.paper`
if strings.HasSuffix(base, paperTemplateExtension) {
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
return
}
// Otherwise, try both `foo.md` coming from `foo`, or from `foo.paper`
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
ret = append(ret, f.getMetadataForExt(ctx, base+paperExtension, ext))
return
}
// getFileMetadata gets the metadata for a file
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileMetadata, error) {
var res getMetadataResult
// Try all possible metadatas
possibleMetadatas := f.possibleMetadatas(ctx, filePath)
for _, ch := range possibleMetadatas {
res = <-ch
if res.err != nil {
return nil, res.err
}
if !res.notFound {
break
}
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *files.FileMetadata, err error) {
entry, notFound, err := f.getMetadata(ctx, filePath)
if err != nil {
return nil, err
}
if res.notFound {
if notFound {
return nil, fs.ErrorObjectNotFound
}
fileInfo, ok := res.entry.(*files.FileMetadata)
fileInfo, ok := entry.(*files.FileMetadata)
if !ok {
if _, ok = res.entry.(*files.FolderMetadata); ok {
if _, ok = entry.(*files.FolderMetadata); ok {
return nil, fs.ErrorIsDir
}
return nil, fs.ErrorNotAFile
@@ -788,15 +628,15 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileM
}
// getDirMetadata gets the metadata for a directory
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (*files.FolderMetadata, error) {
res := f.getMetadata(ctx, dirPath)
if res.err != nil {
return nil, res.err
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files.FolderMetadata, err error) {
entry, notFound, err := f.getMetadata(ctx, dirPath)
if err != nil {
return nil, err
}
if res.notFound {
if notFound {
return nil, fs.ErrorDirNotFound
}
dirInfo, ok := res.entry.(*files.FolderMetadata)
dirInfo, ok := entry.(*files.FolderMetadata)
if !ok {
return nil, fs.ErrorIsFile
}
@@ -996,15 +836,16 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var res *files.ListFolderResult
for {
if !started {
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(root))
arg.Recursive = false
arg.Limit = 1000
arg := files.ListFolderArg{
Path: f.opt.Enc.FromStandardPath(root),
Recursive: false,
Limit: 1000,
}
if root == "/" {
arg.Path = "" // Specify root folder as empty string
}
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(arg)
res, err = f.srv.ListFolder(&arg)
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1057,9 +898,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if err != nil {
return nil, err
}
if o.(*Object).exportType.listable() {
entries = append(entries, o)
}
entries = append(entries, o)
}
}
if !res.HasMore {
@@ -1145,14 +984,16 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
}
// check directory empty
arg := files.NewListFolderArg(encRoot)
arg.Recursive = false
arg := files.ListFolderArg{
Path: encRoot,
Recursive: false,
}
if root == "/" {
arg.Path = "" // Specify root folder as empty string
}
var res *files.ListFolderResult
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(arg)
res, err = f.srv.ListFolder(&arg)
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1333,16 +1174,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return shouldRetry(ctx, err)
})
if err != nil && createArg.Settings.Expires != nil && strings.Contains(err.Error(), sharing.SharedLinkSettingsErrorNotAuthorized) {
// Some plans can't create links with expiry
fs.Debugf(absPath, "can't create link with expiry, trying without")
createArg.Settings.Expires = nil
err = f.pacer.Call(func() (bool, error) {
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
return shouldRetry(ctx, err)
})
}
if err != nil && strings.Contains(err.Error(),
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
@@ -1507,14 +1338,16 @@ func (f *Fs) changeNotifyCursor(ctx context.Context) (cursor string, err error)
var startCursor *files.ListFolderGetLatestCursorResult
err = f.pacer.Call(func() (bool, error) {
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(f.slashRoot))
arg.Recursive = true
arg := files.ListFolderArg{
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
Recursive: true,
}
if arg.Path == "/" {
arg.Path = ""
}
startCursor, err = f.srv.ListFolderGetLatestCursor(arg)
startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
return shouldRetry(ctx, err)
})
@@ -1618,50 +1451,8 @@ func (f *Fs) Shutdown(ctx context.Context) error {
return nil
}
func (f *Fs) chooseExportFormat(info *files.FileMetadata) (exportAPIFormat, exportExtension) {
// Find API export formats Dropbox supports for this file
// Sometimes Dropbox lists a format in ExportAs but not ExportOptions, so check both
ei := info.ExportInfo
dropboxFormatStrings := append([]string{ei.ExportAs}, ei.ExportOptions...)
// Find which extensions these correspond to
exportExtensions := map[exportExtension]exportAPIFormat{}
var dropboxPreferredAPIFormat exportAPIFormat
var dropboxPreferredExtension exportExtension
for _, format := range dropboxFormatStrings {
apiFormat := exportAPIFormat(format)
// Only consider formats we know about
if ext, ok := exportKnownAPIFormats[apiFormat]; ok {
if dropboxPreferredAPIFormat == "" {
dropboxPreferredAPIFormat = apiFormat
dropboxPreferredExtension = ext
}
exportExtensions[ext] = apiFormat
}
}
// See if the user picked a valid extension
for _, ext := range f.exportExts {
if apiFormat, ok := exportExtensions[ext]; ok {
return apiFormat, ext
}
}
// If no matches, prefer the first valid format Dropbox lists
return dropboxPreferredAPIFormat, dropboxPreferredExtension
}
// ------------------------------------------------------------
func (et exportType) listable() bool {
return et != exportHide
}
// something we should _try_ to export
func (et exportType) exportable() bool {
return et == exportExportable || et == exportListOnly
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
@@ -1705,32 +1496,6 @@ func (o *Object) Size() int64 {
return o.bytes
}
func (o *Object) setMetadataForExport(info *files.FileMetadata) {
o.bytes = -1
o.hash = ""
if o.fs.opt.SkipExports {
o.exportType = exportHide
return
}
if o.fs.opt.ShowAllExports {
o.exportType = exportListOnly
return
}
var exportExt exportExtension
o.exportAPIFormat, exportExt = o.fs.chooseExportFormat(info)
if o.exportAPIFormat == "" {
o.exportType = exportHide
} else {
o.exportType = exportExportable
// get rid of any paper extension, if present
o.remote = strings.TrimSuffix(o.remote, paperExtension)
// add the export extension
o.remote += "." + string(exportExt)
}
}
// setMetadataFromEntry sets the fs data from a files.FileMetadata
//
// This isn't a complete set of metadata and has an inaccurate date
@@ -1739,10 +1504,6 @@ func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
o.bytes = int64(info.Size)
o.modTime = info.ClientModified
o.hash = info.ContentHash
if !info.IsDownloadable {
o.setMetadataForExport(info)
}
return nil
}
@@ -1806,27 +1567,6 @@ func (o *Object) Storable() bool {
return true
}
func (o *Object) export(ctx context.Context) (in io.ReadCloser, err error) {
if o.exportType == exportListOnly || o.exportAPIFormat == "" {
fs.Debugf(o.remote, "No export format found")
return nil, fs.ErrorObjectNotFound
}
arg := files.ExportArg{Path: o.id, ExportFormat: string(o.exportAPIFormat)}
var exportResult *files.ExportResult
err = o.fs.pacer.Call(func() (bool, error) {
exportResult, in, err = o.fs.srv.Export(&arg)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
o.bytes = int64(exportResult.ExportMetadata.Size)
o.hash = exportResult.ExportMetadata.ExportHash
return
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.SharedFiles {
@@ -1846,10 +1586,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return
}
if o.exportType.exportable() {
return o.export(ctx)
}
fs.FixRangeOption(options, o.bytes)
headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{

View File

@@ -1,16 +1,9 @@
package dropbox
import (
"context"
"io"
"strings"
"testing"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestInternalCheckPathLength(t *testing.T) {
@@ -49,54 +42,3 @@ func TestInternalCheckPathLength(t *testing.T) {
assert.Equal(t, test.ok, err == nil, test.in)
}
}
func (f *Fs) importPaperForTest(t *testing.T) {
content := `# test doc
Lorem ipsum __dolor__ sit amet
[link](http://google.com)
`
arg := files.PaperCreateArg{
Path: f.slashRootSlash + "export.paper",
ImportFormat: &files.ImportFormat{Tagged: dropbox.Tagged{Tag: files.ImportFormatMarkdown}},
}
var err error
err = f.pacer.Call(func() (bool, error) {
reader := strings.NewReader(content)
_, err = f.srv.PaperCreate(&arg, reader)
return shouldRetry(context.Background(), err)
})
require.NoError(t, err)
}
func (f *Fs) InternalTestPaperExport(t *testing.T) {
ctx := context.Background()
f.importPaperForTest(t)
f.exportExts = []exportExtension{"html"}
obj, err := f.NewObject(ctx, "export.html")
require.NoError(t, err)
rc, err := obj.Open(ctx)
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
buf, err := io.ReadAll(rc)
require.NoError(t, err)
text := string(buf)
for _, excerpt := range []string{
"Lorem ipsum",
"<b>dolor</b>",
`href="http://google.com"`,
} {
require.Contains(t, text, excerpt)
}
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PaperExport", f.InternalTestPaperExport)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -216,11 +216,11 @@ var ItemFields = mustFields(Item{})
// fields returns the JSON fields in use by opt as a | separated
// string.
func fields(opt any) (pipeTags string, err error) {
func fields(opt interface{}) (pipeTags string, err error) {
var tags []string
def := reflect.ValueOf(opt)
defType := def.Type()
for i := range def.NumField() {
for i := 0; i < def.NumField(); i++ {
field := defType.Field(i)
tag, ok := field.Tag.Lookup("json")
if !ok {
@@ -239,7 +239,7 @@ func fields(opt any) (pipeTags string, err error) {
// mustFields returns the JSON fields in use by opt as a | separated
// string. It panics on failure.
func mustFields(opt any) string {
func mustFields(opt interface{}) string {
tags, err := fields(opt)
if err != nil {
panic(err)
@@ -351,12 +351,12 @@ type SpaceInfo struct {
// DeleteResponse is returned from doDeleteFile
type DeleteResponse struct {
Status
Deleted []string `json:"deleted"`
Errors []any `json:"errors"`
ID string `json:"fi_id"`
BackgroundTask int `json:"backgroundtask"`
UsSize string `json:"us_size"`
PaSize string `json:"pa_size"`
Deleted []string `json:"deleted"`
Errors []interface{} `json:"errors"`
ID string `json:"fi_id"`
BackgroundTask int `json:"backgroundtask"`
UsSize string `json:"us_size"`
PaSize string `json:"pa_size"`
//SpaceInfo SpaceInfo `json:"spaceinfo"`
}

View File

@@ -371,7 +371,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
}
// params for rpc
type params map[string]any
type params map[string]interface{}
// rpc calls the rpc.php method of the SME file fabric
//

View File

@@ -10,7 +10,6 @@ import (
"net/http"
"net/url"
"path"
"slices"
"strings"
"time"
@@ -170,9 +169,11 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
}
if apiErr, ok := err.(files_sdk.ResponseError); ok {
if slices.Contains(retryErrorCodes, apiErr.HttpCode) {
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
for _, e := range retryErrorCodes {
if apiErr.HttpCode == e {
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
}
}

View File

@@ -17,7 +17,7 @@ import (
"github.com/stretchr/testify/require"
)
type settings map[string]any
type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash

View File

@@ -25,7 +25,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
@@ -734,7 +734,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
// implementation of ListR
func (f *Fs) listR(ctx context.Context, dir string, list *list.Helper) (err error) {
func (f *Fs) listR(ctx context.Context, dir string, list *walk.ListRHelper) (err error) {
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
@@ -820,7 +820,7 @@ func (f *Fs) listR(ctx context.Context, dir string, list *list.Helper) (err erro
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
err = f.listR(ctx, dir, list)
if err != nil {
return err

View File

@@ -35,7 +35,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
@@ -845,7 +845,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)

View File

@@ -4,7 +4,6 @@ package googlephotos
import (
"path"
"slices"
"strings"
"sync"
@@ -120,7 +119,7 @@ func (as *albums) _del(album *api.Album) {
dirs := as.path[dir]
for i, dir := range dirs {
if dir == leaf {
dirs = slices.Delete(dirs, i, i+1)
dirs = append(dirs[:i], dirs[i+1:]...)
break
}
}

View File

@@ -167,7 +167,7 @@ listings and won't be transferred.`,
The Google API will deliver images and video which aren't full
resolution, and/or have EXIF data missing.
However if you use the gphotosdl proxy then you can download original,
However if you ue the gphotosdl proxy tnen you can download original,
unchanged images.
This runs a headless browser in the background.
@@ -388,7 +388,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
Method: "GET",
RootURL: "https://accounts.google.com/.well-known/openid-configuration",
}
var openIDconfig map[string]any
var openIDconfig map[string]interface{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
return shouldRetry(ctx, resp, err)
@@ -448,7 +448,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
"token_type_hint": []string{"access_token"},
},
}
var res any
var res interface{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
return shouldRetry(ctx, resp, err)

View File

@@ -2,7 +2,6 @@ package googlephotos
import (
"context"
"errors"
"fmt"
"io"
"net/http"
@@ -36,7 +35,7 @@ func TestIntegration(t *testing.T) {
*fstest.RemoteName = "TestGooglePhotos:"
}
f, err := fs.NewFs(ctx, *fstest.RemoteName)
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
if err == fs.ErrorNotFoundInConfigFile {
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
}
require.NoError(t, err)

View File

@@ -24,7 +24,7 @@ import (
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "drop":
return nil, f.db.Stop(true)

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/kv"
)
@@ -183,9 +182,6 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
// Enable ListP always
f.features.ListP = f.ListP
cache.PinUntilFinalized(f.Fs, f)
return f, err
}
@@ -241,39 +237,10 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
// List the objects and directories in dir into entries.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.wrapEntries(entries)
if err != nil {
return err
}
return callback(entries)
if entries, err = f.Fs.List(ctx, dir); err != nil {
return nil, err
}
listP := f.Fs.Features().ListP
if listP == nil {
entries, err := f.Fs.List(ctx, dir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
return f.wrapEntries(entries)
}
// ListR lists the objects and directories recursively into out.

View File

@@ -6,7 +6,6 @@ import (
"encoding/gob"
"errors"
"fmt"
"maps"
"strings"
"time"
@@ -196,7 +195,9 @@ func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
r.Fp = op.fp
}
maps.Copy(r.Hashes, op.hashes)
for hashType, hashVal := range op.hashes {
r.Hashes[hashType] = hashVal
}
if data, err = r.encode(op.key); err != nil {
return fmt.Errorf("marshal failed: %w", err)
}

View File

@@ -52,7 +52,10 @@ func writeByBlock(p []byte, writer io.Writer, blockSize uint32, bytesInBlock *ui
total := len(p)
nullBytes := make([]byte, blockSize)
for len(p) > 0 {
toWrite := min(int(blockSize-*bytesInBlock), len(p))
toWrite := int(blockSize - *bytesInBlock)
if toWrite > len(p) {
toWrite = len(p)
}
c, err := writer.Write(p[:toWrite])
*bytesInBlock += uint32(c)
*onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite])
@@ -273,7 +276,7 @@ func (h *hidriveHash) Sum(b []byte) []byte {
}
checksum := zeroSum
for i := range h.levels {
for i := 0; i < len(h.levels); i++ {
level := h.levels[i]
if i < len(h.levels)-1 {
// Aggregate non-empty non-final levels.

View File

@@ -216,7 +216,7 @@ func TestLevelWrite(t *testing.T) {
func TestLevelIsFull(t *testing.T) {
content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
l := hidrivehash.NewLevel()
for range 256 {
for i := 0; i < 256; i++ {
assert.False(t, l.(internal.LevelHash).IsFull())
written, err := l.Write(content[:])
assert.Equal(t, len(content), written)

View File

@@ -180,6 +180,7 @@ func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Op
}
addHeaders(req, opt)
res, err := noRedir.Do(req)
if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
return createFileResult()
@@ -248,14 +249,6 @@ func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err
f.httpClient = client
f.endpoint = u
f.endpointURL = u.String()
if isFile {
// Correct root if definitely pointing to a file
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
return isFile, nil
}
@@ -512,7 +505,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
entries = append(entries, entry)
entriesMu.Unlock()
}
for range checkers {
for i := 0; i < checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
@@ -747,7 +740,7 @@ It doesn't return anything.
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "set":
newOpt := f.opt

View File

@@ -76,7 +76,7 @@ func (c *Client) DriveService() (*DriveService, error) {
// This function is the main entry point for making requests to the iCloud
// API. If the initial request returns a 401 (Unauthorized), it will try to
// reauthenticate and retry the request.
func (c *Client) Request(ctx context.Context, opts rest.Opts, request any, response any) (resp *http.Response, err error) {
func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
resp, err = c.Session.Request(ctx, opts, request, response)
if err != nil && resp != nil {
// try to reauth
@@ -100,7 +100,7 @@ func (c *Client) Request(ctx context.Context, opts rest.Opts, request any, respo
// This function is useful when you have a session that is already
// authenticated, but you need to make a request without triggering
// a re-authentication.
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request any, response any) (resp *http.Response, err error) {
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
// Make the request without re-authenticating
resp, err = c.Session.Request(ctx, opts, request, response)
return resp, err
@@ -161,6 +161,6 @@ func newRequestError(Status string, Text string) *RequestError {
}
// newErr orf makes a new error from sprintf parameters.
func newRequestErrorf(Status string, Text string, Parameters ...any) *RequestError {
func newRequestErrorf(Status string, Text string, Parameters ...interface{}) *RequestError {
return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...))
}

View File

@@ -476,7 +476,7 @@ func (d *DriveService) MoveItemByDriveID(ctx context.Context, id, etag, dstID st
// CopyDocByItemID copies a document by its item ID.
func (d *DriveService) CopyDocByItemID(ctx context.Context, itemID string) (*DriveItemRaw, *http.Response, error) {
// putting name in info doesn't work. extension does work so assume this is a bug in the endpoint
// putting name in info doesnt work. extension does work so assume this is a bug in the endpoint
values := map[string]any{
"info_to_update": map[string]any{},
}
@@ -631,7 +631,7 @@ func NewUpdateFileInfo() UpdateFileInfo {
FileFlags: FileFlags{
IsExecutable: true,
IsHidden: false,
IsWritable: true,
IsWritable: false,
},
}
}
@@ -733,8 +733,8 @@ type DocumentUpdateResponse struct {
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
} `json:"status"`
OperationID any `json:"operation_id"`
Document *Document `json:"document"`
OperationID interface{} `json:"operation_id"`
Document *Document `json:"document"`
} `json:"results"`
}
@@ -765,9 +765,9 @@ type Document struct {
IsWritable bool `json:"is_writable"`
IsHidden bool `json:"is_hidden"`
} `json:"file_flags"`
LastOpenedTime int64 `json:"lastOpenedTime"`
RestorePath any `json:"restorePath"`
HasChainedParent bool `json:"hasChainedParent"`
LastOpenedTime int64 `json:"lastOpenedTime"`
RestorePath interface{} `json:"restorePath"`
HasChainedParent bool `json:"hasChainedParent"`
}
// DriveID returns the drive ID of the Document.

View File

@@ -3,13 +3,13 @@ package api
import (
"context"
"fmt"
"maps"
"net/http"
"net/url"
"slices"
"strings"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/rest"
)
@@ -35,7 +35,7 @@ type Session struct {
// }
// Request makes a request
func (s *Session) Request(ctx context.Context, opts rest.Opts, request any, response any) (*http.Response, error) {
func (s *Session) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (*http.Response, error) {
resp, err := s.srv.CallJSON(ctx, &opts, &request, &response)
if err != nil {
@@ -129,7 +129,7 @@ func (s *Session) AuthWithToken(ctx context.Context) error {
// Validate2FACode validates the 2FA code
func (s *Session) Validate2FACode(ctx context.Context, code string) error {
values := map[string]any{"securityCode": map[string]string{"code": code}}
values := map[string]interface{}{"securityCode": map[string]string{"code": code}}
body, err := IntoReader(values)
if err != nil {
return err
@@ -220,7 +220,9 @@ func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string
"Referer": fmt.Sprintf("%s/", homeEndpoint),
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
}
maps.Copy(headers, overwrite)
for k, v := range overwrite {
headers[k] = v
}
return headers
}
@@ -228,7 +230,9 @@ func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string
func (s *Session) GetHeaders(overwrite map[string]string) map[string]string {
headers := GetCommonHeaders(map[string]string{})
headers["Cookie"] = s.GetCookieString()
maps.Copy(headers, overwrite)
for k, v := range overwrite {
headers[k] = v
}
return headers
}
@@ -250,7 +254,9 @@ func GetCommonHeaders(overwrite map[string]string) map[string]string {
"Referer": fmt.Sprintf("%s/", baseEndpoint),
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
}
maps.Copy(headers, overwrite)
for k, v := range overwrite {
headers[k] = v
}
return headers
}
@@ -332,33 +338,33 @@ type AccountInfo struct {
// ValidateDataDsInfo represents an validation info
type ValidateDataDsInfo struct {
HsaVersion int `json:"hsaVersion"`
LastName string `json:"lastName"`
ICDPEnabled bool `json:"iCDPEnabled"`
TantorMigrated bool `json:"tantorMigrated"`
Dsid string `json:"dsid"`
HsaEnabled bool `json:"hsaEnabled"`
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
IroncadeMigrated bool `json:"ironcadeMigrated"`
Locale string `json:"locale"`
BrZoneConsolidated bool `json:"brZoneConsolidated"`
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
IsManagedAppleID bool `json:"isManagedAppleID"`
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
Gilligvited bool `json:"gilligvited"`
AppleIDAliases []any `json:"appleIdAliases"`
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
IsPaidDeveloper bool `json:"isPaidDeveloper"`
CountryCode string `json:"countryCode"`
NotificationID string `json:"notificationId"`
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
ADsID string `json:"aDsID"`
Locked bool `json:"locked"`
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
PrimaryEmail string `json:"primaryEmail"`
HsaVersion int `json:"hsaVersion"`
LastName string `json:"lastName"`
ICDPEnabled bool `json:"iCDPEnabled"`
TantorMigrated bool `json:"tantorMigrated"`
Dsid string `json:"dsid"`
HsaEnabled bool `json:"hsaEnabled"`
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
IroncadeMigrated bool `json:"ironcadeMigrated"`
Locale string `json:"locale"`
BrZoneConsolidated bool `json:"brZoneConsolidated"`
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
IsManagedAppleID bool `json:"isManagedAppleID"`
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
Gilligvited bool `json:"gilligvited"`
AppleIDAliases []interface{} `json:"appleIdAliases"`
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
IsPaidDeveloper bool `json:"isPaidDeveloper"`
CountryCode string `json:"countryCode"`
NotificationID string `json:"notificationId"`
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
ADsID string `json:"aDsID"`
Locked bool `json:"locked"`
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
PrimaryEmail string `json:"primaryEmail"`
AppleIDEntries []struct {
IsPrimary bool `json:"isPrimary"`
Type string `json:"type"`

View File

@@ -445,7 +445,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// build request
// can't use normal rename as file needs to be "activated" first
// cant use normal rename as file needs to be "activated" first
r := api.NewUpdateFileInfo()
r.DocumentID = doc.DocumentID

View File

@@ -75,7 +75,7 @@ type MoveFolderParam struct {
DestinationPath string `validate:"nonzero" json:"destinationPath"`
}
// JobIDResponse represents response struct with JobID for folder operations
// JobIDResponse respresents response struct with JobID for folder operations
type JobIDResponse struct {
JobID string `json:"jobId"`
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"net/http"
"slices"
"strconv"
"time"
@@ -143,7 +142,12 @@ func shouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
if resp == nil {
return false
}
return slices.Contains(retryErrorCodes, resp.StatusCode)
for _, e := range retryErrorCodes {
if resp.StatusCode == e {
return true
}
}
return false
}
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {

View File

@@ -13,7 +13,6 @@ import (
"net/url"
"path"
"regexp"
"slices"
"strconv"
"strings"
"time"
@@ -152,19 +151,6 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
Help: "Host of InternetArchive Frontend.\n\nLeave blank for default value.",
Default: "https://archive.org",
Advanced: true,
}, {
Name: "item_metadata",
Help: `Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set.
Format is key=value and the 'x-archive-meta-' prefix is automatically added.`,
Default: []string{},
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "item_derive",
Help: `Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload.
The derive process produces a number of secondary files from an upload to make an upload more usable on the web.
Setting this to false is useful for uploading files that are already in a format that IA can display or reduce burden on IA's infrastructure.`,
Default: true,
}, {
Name: "disable_checksum",
Help: `Don't ask the server to test against MD5 checksum calculated by rclone.
@@ -201,7 +187,7 @@ Only enable if you need to be guaranteed to be reflected after write operations.
const iaItemMaxSize int64 = 1099511627776
// metadata keys that are not writeable
var roMetadataKey = map[string]any{
var roMetadataKey = map[string]interface{}{
// do not add mtime here, it's a documented exception
"name": nil, "source": nil, "size": nil, "md5": nil,
"crc32": nil, "sha1": nil, "format": nil, "old_version": nil,
@@ -215,8 +201,6 @@ type Options struct {
Endpoint string `config:"endpoint"`
FrontEndpoint string `config:"front_endpoint"`
DisableChecksum bool `config:"disable_checksum"`
ItemMetadata []string `config:"item_metadata"`
ItemDerive bool `config:"item_derive"`
WaitArchive fs.Duration `config:"wait_archive"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -806,23 +790,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
"x-amz-filemeta-rclone-update-track": updateTracker,
// we add some more headers for intuitive actions
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
"x-archive-meta-mediatype": "data", // mark media type of the uploading file as "data"
"x-archive-queue-derive": "0", // skip derivation process (e.g. encoding to smaller files, OCR on PDFs)
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
}
if size >= 0 {
headers["Content-Length"] = fmt.Sprintf("%d", size)
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
}
// This is IA's ITEM metadata, not file metadata
headers, err = o.appendItemMetadataHeaders(headers, o.fs.opt)
if err != nil {
return err
}
var mdata fs.Metadata
mdata, err = fs.GetMetadataOptions(ctx, o.fs, src, options)
if err == nil && mdata != nil {
@@ -885,51 +863,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
func (o *Object) appendItemMetadataHeaders(headers map[string]string, options Options) (newHeaders map[string]string, err error) {
metadataCounter := make(map[string]int)
metadataValues := make(map[string][]string)
// First pass: count occurrences and collect values
for _, v := range options.ItemMetadata {
parts := strings.SplitN(v, "=", 2)
if len(parts) != 2 {
return newHeaders, errors.New("item metadata key=value should be in the form key=value")
}
key, value := parts[0], parts[1]
metadataCounter[key]++
metadataValues[key] = append(metadataValues[key], value)
}
// Second pass: add headers with appropriate prefixes
for key, count := range metadataCounter {
if count == 1 {
// Only one occurrence, use x-archive-meta-
headers[fmt.Sprintf("x-archive-meta-%s", key)] = metadataValues[key][0]
} else {
// Multiple occurrences, use x-archive-meta01-, x-archive-meta02-, etc.
for i, value := range metadataValues[key] {
headers[fmt.Sprintf("x-archive-meta%02d-%s", i+1, key)] = value
}
}
}
if o.fs.opt.ItemDerive {
headers["x-archive-queue-derive"] = "1"
} else {
headers["x-archive-queue-derive"] = "0"
}
fs.Debugf(o, "Setting IA item derive: %t", o.fs.opt.ItemDerive)
for k, v := range headers {
if strings.HasPrefix(k, "x-archive-meta") {
fs.Debugf(o, "Setting IA item metadata: %s=%s", k, v)
}
}
return headers, nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split()
@@ -992,8 +925,10 @@ func (o *Object) Metadata(ctx context.Context) (m fs.Metadata, err error) {
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
if resp != nil {
if slices.Contains(retryErrorCodes, resp.StatusCode) {
return true, err
for _, e := range retryErrorCodes {
if resp.StatusCode == e {
return true, err
}
}
}
// Ok, not an awserr, check for generic failure conditions
@@ -1146,7 +1081,13 @@ func (f *Fs) waitFileUpload(ctx context.Context, reqPath, tracker string, newSiz
}
fileTrackers, _ := listOrString(iaFile.UpdateTrack)
trackerMatch := slices.Contains(fileTrackers, tracker)
trackerMatch := false
for _, v := range fileTrackers {
if v == tracker {
trackerMatch = true
break
}
}
if !trackerMatch {
continue
}

View File

@@ -70,7 +70,7 @@ func (t *Rfc3339Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
// MarshalJSON turns a Rfc3339Time into JSON
func (t *Rfc3339Time) MarshalJSON() ([]byte, error) {
return fmt.Appendf(nil, "\"%s\"", t.String()), nil
return []byte(fmt.Sprintf("\"%s\"", t.String())), nil
}
// LoginToken is struct representing the login token generated in the WebUI
@@ -165,25 +165,25 @@ type DeviceRegistrationResponse struct {
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
type CustomerInfo struct {
Username string `json:"username"`
Email string `json:"email"`
Name string `json:"name"`
CountryCode string `json:"country_code"`
LanguageCode string `json:"language_code"`
CustomerGroupCode string `json:"customer_group_code"`
BrandCode string `json:"brand_code"`
AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"`
Quota int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"`
BusinessQuota int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"`
LockedCause any `json:"locked_cause"`
WebHash string `json:"web_hash"`
AndroidHash string `json:"android_hash"`
IOSHash string `json:"ios_hash"`
Username string `json:"username"`
Email string `json:"email"`
Name string `json:"name"`
CountryCode string `json:"country_code"`
LanguageCode string `json:"language_code"`
CustomerGroupCode string `json:"customer_group_code"`
BrandCode string `json:"brand_code"`
AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"`
Quota int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"`
BusinessQuota int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"`
LockedCause interface{} `json:"locked_cause"`
WebHash string `json:"web_hash"`
AndroidHash string `json:"android_hash"`
IOSHash string `json:"ios_hash"`
}
// TrashResponse is returned when emptying the Trash

View File

@@ -31,7 +31,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
@@ -1264,7 +1264,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
Parameters: url.Values{},
}
opts.Parameters.Set("mode", "liststream")
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {

View File

@@ -193,7 +193,7 @@ func (o *Object) set(e *entity) {
// Call linkbox with the query in opts and return result
//
// This will be checked for error and an error will be returned if Status != 1
func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result any) error {
func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result interface{}) error {
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, opts, nil, &result)
return f.shouldRetry(ctx, resp, err)

View File

@@ -1046,7 +1046,7 @@ you can try to change the output.`,
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name {
case "noop":
if txt, ok := opt["error"]; ok {
@@ -1056,7 +1056,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
return nil, errors.New(txt)
}
if _, ok := opt["echo"]; ok {
out := map[string]any{}
out := map[string]interface{}{}
out["name"] = name
out["arg"] = arg
out["opt"] = opt

View File

@@ -86,7 +86,7 @@ func TestVerifyCopy(t *testing.T) {
require.NoError(t, err)
src.(*Object).fs.opt.NoCheckUpdated = true
for i := range 100 {
for i := 0; i < 100; i++ {
go r.WriteFile(src.Remote(), fmt.Sprintf("some new content %d", i), src.ModTime(context.Background()))
}
_, err = operations.Copy(context.Background(), r.Fremote, nil, filePath+"2", src)

View File

@@ -63,8 +63,8 @@ type UserInfoResponse struct {
Prolong bool `json:"prolong"`
Promocodes struct {
} `json:"promocodes"`
Subscription []any `json:"subscription"`
Version string `json:"version"`
Subscription []interface{} `json:"subscription"`
Version string `json:"version"`
} `json:"billing"`
Bonuses struct {
CameraUpload bool `json:"camera_upload"`

View File

@@ -901,7 +901,7 @@ func (t *treeState) NextRecord() (fs.DirEntry, error) {
return nil, nil
case api.ListParseUnknown15:
skip := int(r.ReadPu32())
for range skip {
for i := 0; i < skip; i++ {
r.ReadPu32()
r.ReadPu32()
}
@@ -1768,7 +1768,7 @@ func (f *Fs) eligibleForSpeedup(remote string, size int64, options ...fs.OpenOpt
func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
f.speedupGlobs = nil
f.speedupAny = false
uniqueValidPatterns := make(map[string]any)
uniqueValidPatterns := make(map[string]interface{})
for _, pattern := range strings.Split(patternString, ",") {
pattern = strings.ToLower(strings.TrimSpace(pattern))
@@ -2131,7 +2131,10 @@ func getTransferRange(size int64, options ...fs.OpenOption) (start int64, end in
if limit < 0 {
limit = size - offset
}
end = min(offset+limit, size)
end = offset + limit
if end > size {
end = size
}
partial = !(offset == 0 && end == size)
return offset, end, partial
}

View File

@@ -11,7 +11,7 @@ import (
func testChunk(t *testing.T, chunk int) {
data := make([]byte, chunk)
for i := range chunk {
for i := 0; i < chunk; i++ {
data[i] = 'A'
}
for _, test := range []struct {

View File

@@ -21,7 +21,6 @@ import (
"fmt"
"io"
"path"
"slices"
"strings"
"sync"
"time"
@@ -219,11 +218,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
srv = mega.New().SetClient(fshttp.NewClient(ctx))
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
srv.SetHTTPS(opt.UseHTTPS)
srv.SetLogger(func(format string, v ...any) {
srv.SetLogger(func(format string, v ...interface{}) {
fs.Infof("*go-mega*", format, v...)
})
if opt.Debug {
srv.SetDebugger(func(format string, v ...any) {
srv.SetDebugger(func(format string, v ...interface{}) {
fs.Debugf("*go-mega*", format, v...)
})
}
@@ -499,8 +498,11 @@ func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, e
if err != nil {
return false, fmt.Errorf("list failed: %w", err)
}
if slices.ContainsFunc(nodes, fn) {
found = true
for _, item := range nodes {
if fn(item) {
found = true
break
}
}
return
}
@@ -1154,7 +1156,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Upload the chunks
// FIXME do this in parallel
for id := range u.Chunks() {
for id := 0; id < u.Chunks(); id++ {
_, chunkSize, err := u.ChunkLocation(id)
if err != nil {
return fmt.Errorf("upload failed to read chunk location: %w", err)

View File

@@ -17,7 +17,7 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
)
@@ -383,7 +383,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
entries := fs.DirEntries{}
listR := func(bucket, directory, prefix string, addBucket bool) error {
err = f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, entry fs.DirEntry, isDirectory bool) error {

View File

@@ -29,7 +29,7 @@ func testPurgeListDeadlock(t *testing.T) {
r.Fremote.Features().Disable("Purge") // force fallback-purge
// make a lot of files to prevent it from finishing too quickly
for i := range 100 {
for i := 0; i < 100; i++ {
dst := "file" + fmt.Sprint(i) + ".txt"
r.WriteObject(ctx, dst, "hello", t1)
}

View File

@@ -28,7 +28,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
@@ -274,7 +274,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// Command the backend to run a named commands: du and symlink
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "du":
// No arg parsing needed, the path is passed in the fs
@@ -516,7 +516,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return fs.ErrorDirNotFound
}
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
for resumeStart := u.Path; resumeStart != ""; {
var files []File
files, resumeStart, err = f.netStorageListRequest(ctx, URL, u.Path)
@@ -858,7 +858,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
// callBackend calls NetStorage API using either rest.Call or rest.CallXML function,
// depending on whether the response is required
func (f *Fs) callBackend(ctx context.Context, URL, method, actionHeader string, noResponse bool, response any, options []fs.OpenOption) (io.ReadCloser, error) {
func (f *Fs) callBackend(ctx context.Context, URL, method, actionHeader string, noResponse bool, response interface{}, options []fs.OpenOption) (io.ReadCloser, error) {
opts := rest.Opts{
Method: method,
RootURL: URL,
@@ -1080,7 +1080,7 @@ func (o *Object) netStorageDownloadRequest(ctx context.Context, options []fs.Ope
}
// netStorageDuRequest performs a NetStorage du request
func (f *Fs) netStorageDuRequest(ctx context.Context) (any, error) {
func (f *Fs) netStorageDuRequest(ctx context.Context) (interface{}, error) {
URL := f.url("")
const actionHeader = "version=1&action=du&format=xml&encoding=utf-8"
duResp := &Du{}
@@ -1100,7 +1100,7 @@ func (f *Fs) netStorageDuRequest(ctx context.Context) (any, error) {
}
// netStorageDuRequest performs a NetStorage symlink request
func (f *Fs) netStorageSymlinkRequest(ctx context.Context, URL string, dst string, modTime *int64) (any, error) {
func (f *Fs) netStorageSymlinkRequest(ctx context.Context, URL string, dst string, modTime *int64) (interface{}, error) {
target := url.QueryEscape(strings.TrimSuffix(dst, "/"))
actionHeader := "version=1&action=symlink&target=" + target
if modTime != nil {

View File

@@ -396,57 +396,10 @@ func (m *Metadata) WritePermissions(ctx context.Context) (err error) {
return nil
}
// Order the permissions so that any with users come first.
//
// This is to work around a quirk with Graph:
//
// 1. You are adding permissions for both a group and a user.
// 2. The user is a member of the group.
// 3. The permissions for the group and user are the same.
// 4. You are adding the group permission before the user permission.
//
// When all of the above are true, Graph indicates it has added the
// user permission, but it immediately drops it
//
// See: https://github.com/rclone/rclone/issues/8465
func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
// Return true if identity has any user permissions
hasUserIdentity := func(identity *api.IdentitySet) bool {
if identity == nil {
return false
}
return identity.User.ID != "" || identity.User.DisplayName != "" || identity.User.Email != "" || identity.User.LoginName != ""
}
// Return true if p has any user permissions
hasUser := func(p *api.PermissionsType) bool {
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
return true
}
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
if hasUserIdentity(identity) {
return true
}
}
return false
}
// Put Permissions with a user first, leaving unsorted otherwise
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {
aHasUser := hasUser(a)
bHasUser := hasUser(b)
if aHasUser && !bHasUser {
return -1
} else if !aHasUser && bHasUser {
return 1
}
return 0
})
}
// sortPermissions sorts the permissions (to be written) into add, update, and remove queues
func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType) {
new, old := m.queuedPermissions, m.permissions
if len(old) == 0 || m.permsAddOnly {
m.orderPermissions(new)
return new, nil, nil // they must all be "add"
}
@@ -494,9 +447,6 @@ func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType
remove = append(remove, o)
}
}
m.orderPermissions(add)
m.orderPermissions(update)
m.orderPermissions(remove)
return add, update, remove
}

View File

@@ -1,125 +0,0 @@
package onedrive
import (
"encoding/json"
"testing"
"github.com/rclone/rclone/backend/onedrive/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestOrderPermissions(t *testing.T) {
tests := []struct {
name string
input []*api.PermissionsType
expected []string
}{
{
name: "empty",
input: []*api.PermissionsType{},
expected: []string(nil),
},
{
name: "users first, then group, then none",
input: []*api.PermissionsType{
{ID: "1", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group1"}}},
{ID: "2", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{DisplayName: "Alice"}}}},
{ID: "3", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Alice"}}},
{ID: "4"},
},
expected: []string{"2", "3", "1", "4"},
},
{
name: "same type unsorted",
input: []*api.PermissionsType{
{ID: "b", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group B"}}},
{ID: "a", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group A"}}},
{ID: "c", GrantedToIdentities: []*api.IdentitySet{{Group: api.Identity{DisplayName: "Group A"}}, {User: api.Identity{DisplayName: "Alice"}}}},
},
expected: []string{"c", "b", "a"},
},
{
name: "all user identities",
input: []*api.PermissionsType{
{ID: "c", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Bob"}}},
{ID: "a", GrantedTo: &api.IdentitySet{User: api.Identity{Email: "alice@example.com"}}},
{ID: "b", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{LoginName: "user3"}}}},
},
expected: []string{"c", "a", "b"},
},
{
name: "no user or group info",
input: []*api.PermissionsType{
{ID: "z"},
{ID: "x"},
{ID: "y"},
},
expected: []string{"z", "x", "y"},
},
}
for _, driveType := range []string{driveTypePersonal, driveTypeBusiness} {
t.Run(driveType, func(t *testing.T) {
for _, tt := range tests {
m := &Metadata{fs: &Fs{driveType: driveType}}
t.Run(tt.name, func(t *testing.T) {
if driveType == driveTypeBusiness {
for i := range tt.input {
tt.input[i].GrantedToV2 = tt.input[i].GrantedTo
tt.input[i].GrantedTo = nil
tt.input[i].GrantedToIdentitiesV2 = tt.input[i].GrantedToIdentities
tt.input[i].GrantedToIdentities = nil
}
}
m.orderPermissions(tt.input)
var gotIDs []string
for _, p := range tt.input {
gotIDs = append(gotIDs, p.ID)
}
assert.Equal(t, tt.expected, gotIDs)
})
}
})
}
}
func TestOrderPermissionsJSON(t *testing.T) {
testJSON := `[
{
"id": "1",
"grantedToV2": {
"group": {
"id": "group@example.com"
}
},
"roles": [
"write"
]
},
{
"id": "2",
"grantedToV2": {
"user": {
"id": "user@example.com"
}
},
"roles": [
"write"
]
}
]`
var testPerms []*api.PermissionsType
err := json.Unmarshal([]byte(testJSON), &testPerms)
require.NoError(t, err)
m := &Metadata{fs: &Fs{driveType: driveTypeBusiness}}
m.orderPermissions(testPerms)
var gotIDs []string
for _, p := range testPerms {
gotIDs = append(gotIDs, p.ID)
}
assert.Equal(t, []string{"2", "1"}, gotIDs)
}

View File

@@ -30,7 +30,6 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
@@ -132,7 +131,7 @@ func init() {
Help: "Microsoft Cloud for US Government",
}, {
Value: regionDE,
Help: "Microsoft Cloud Germany (deprecated - try " + regionGlobal + " region first).",
Help: "Microsoft Cloud Germany",
}, {
Value: regionCN,
Help: "Azure and Office 365 operated by Vnet Group in China",
@@ -1397,7 +1396,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// So we have to filter things outside of the root which is
// inefficient.
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
// list a folder conventionally - used for shared folders
var listFolder func(dir string) error
@@ -2533,7 +2532,10 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec
remaining := size
position := int64(0)
for remaining > 0 {
n := min(remaining, int64(o.fs.opt.ChunkSize))
n := int64(o.fs.opt.ChunkSize)
if remaining < n {
n = remaining
}
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n, options...)

View File

@@ -86,7 +86,7 @@ func (q *quickXorHash) Write(p []byte) (n int, err error) {
// Calculate the current checksum
func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
for i := range dataSize {
for i := 0; i < dataSize; i++ {
shift := (i * 11) % 160
shiftBytes := shift / 8
shiftBits := shift % 8

View File

@@ -130,7 +130,10 @@ func TestQuickXorHashByBlock(t *testing.T) {
require.NoError(t, err, what)
h := New()
for i := 0; i < len(in); i += blockSize {
end := min(i+blockSize, len(in))
end := i + blockSize
if end > len(in) {
end = len(in)
}
n, err := h.Write(in[i:end])
require.Equal(t, end-i, n, what)
require.NoError(t, err, what)

View File

@@ -92,21 +92,6 @@ Note that these chunks are buffered in memory so increasing them will
increase memory use.`,
Default: 10 * fs.Mebi,
Advanced: true,
}, {
Name: "access",
Help: "Files and folders will be uploaded with this access permission (default private)",
Default: "private",
Advanced: true,
Examples: []fs.OptionExample{{
Value: "private",
Help: "The file or folder access can be granted in a way that will allow select users to view, read or write what is absolutely essential for them.",
}, {
Value: "public",
Help: "The file or folder can be downloaded by anyone from a web browser. The link can be shared in any way,",
}, {
Value: "hidden",
Help: "The file or folder can be accessed has the same restrictions as Public if the user knows the URL of the file or folder link in order to access the contents",
}},
}},
})
}
@@ -117,7 +102,6 @@ type Options struct {
Password string `config:"password"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Access string `config:"access"`
}
// Fs represents a remote server
@@ -491,7 +475,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
Method: "POST",
Path: "/file/move_copy.json",
}
var request any = moveCopyFileData
var request interface{} = moveCopyFileData
// use /file/rename.json if moving within the same directory
_, srcDirID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
@@ -564,7 +548,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
Method: "POST",
Path: "/folder/move_copy.json",
}
var request any = moveFolderData
var request interface{} = moveFolderData
// use /folder/rename.json if moving within the same parent directory
if srcDirectoryID == dstDirectoryID {
@@ -751,23 +735,6 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// getAccessLevel is a helper function to determine access level integer
func getAccessLevel(access string) int64 {
var accessLevel int64
switch access {
case "private":
accessLevel = 0
case "public":
accessLevel = 1
case "hidden":
accessLevel = 2
default:
accessLevel = 0
fs.Errorf(nil, "Invalid access: %s, defaulting to private", access)
}
return accessLevel
}
// DirCacher methods
// CreateDir makes a directory with pathID as parent and name leaf
@@ -780,7 +747,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
SessionID: f.session.SessionID,
FolderName: f.opt.Enc.FromStandardName(leaf),
FolderSubParent: pathID,
FolderIsPublic: getAccessLevel(f.opt.Access),
FolderIsPublic: 0,
FolderPublicUpl: 0,
FolderPublicDisplay: 0,
FolderPublicDnl: 0,
@@ -1042,7 +1009,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
chunkCounter := 0
for remainingBytes > 0 {
currentChunkSize := min(int64(o.fs.opt.ChunkSize), remainingBytes)
currentChunkSize := int64(o.fs.opt.ChunkSize)
if currentChunkSize > remainingBytes {
currentChunkSize = remainingBytes
}
remainingBytes -= currentChunkSize
fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes)
@@ -1110,7 +1080,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Set permissions
err = o.fs.pacer.Call(func() (bool, error) {
update := permissions{SessionID: o.fs.session.SessionID, FileID: o.id, FileIsPublic: getAccessLevel(o.fs.opt.Access)}
update := permissions{SessionID: o.fs.session.SessionID, FileID: o.id, FileIsPublic: 0}
// fs.Debugf(nil, "Permissions : %#v", update)
opts := rest.Opts{
Method: "POST",

View File

@@ -131,7 +131,7 @@ If it is a string or a []string it will be shown to the user
otherwise it will be JSON encoded and shown to the user like that
*/
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
opt map[string]string) (result any, err error) {
opt map[string]string) (result interface{}, err error) {
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
switch commandName {
case operationRename:
@@ -159,7 +159,7 @@ func (f *Fs) Command(ctx context.Context, commandName string, args []string,
}
}
func (f *Fs) rename(ctx context.Context, remote, newName string) (any, error) {
func (f *Fs) rename(ctx context.Context, remote, newName string) (interface{}, error) {
if remote == "" {
return nil, fmt.Errorf("path to object file cannot be empty")
}
@@ -332,7 +332,7 @@ func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPat
return uploadedParts, nil
}
func (f *Fs) restore(ctx context.Context, opt map[string]string) (any, error) {
func (f *Fs) restore(ctx context.Context, opt map[string]string) (interface{}, error) {
req := objectstorage.RestoreObjectsRequest{
NamespaceName: common.String(f.opt.Namespace),
RestoreObjectsDetails: objectstorage.RestoreObjectsDetails{},

View File

@@ -112,7 +112,7 @@ func copyObjectWaitForWorkRequest(ctx context.Context, wID *string, entityType s
string(objectstorage.WorkRequestSummaryStatusCanceled),
string(objectstorage.WorkRequestStatusFailed),
},
Refresh: func() (any, string, error) {
Refresh: func() (interface{}, string, error) {
getWorkRequestRequest := objectstorage.GetWorkRequestRequest{}
getWorkRequestRequest.WorkRequestId = wID
workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest)

View File

@@ -131,7 +131,7 @@ func (o *Object) setMetaData(
contentMd5 *string,
contentType *string,
lastModified *common.SDKTime,
storageTier any,
storageTier interface{},
meta map[string]string) error {
if contentLength != nil {

View File

@@ -18,8 +18,8 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/pacer"
)
@@ -649,7 +649,7 @@ of listing recursively that doing a directory traversal.
*/
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucketName, directory := f.split(dir)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)

View File

@@ -5,7 +5,6 @@ package oracleobjectstorage
import (
"context"
"fmt"
"slices"
"strings"
"time"
@@ -24,7 +23,7 @@ var refreshGracePeriod = 30 * time.Second
//
// `state` is the latest state of that object. And `err` is any error that
// may have happened while refreshing the state.
type StateRefreshFunc func() (result any, state string, err error)
type StateRefreshFunc func() (result interface{}, state string, err error)
// StateChangeConf is the configuration struct used for `WaitForState`.
type StateChangeConf struct {
@@ -57,7 +56,7 @@ type StateChangeConf struct {
// reach the target state.
//
// Cancellation from the passed in context will cancel the refresh loop
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (any, error) {
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (interface{}, error) {
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
notfoundTick := 0
@@ -73,7 +72,7 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
}
type Result struct {
Result any
Result interface{}
State string
Error error
Done bool
@@ -166,9 +165,12 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
}
}
if slices.Contains(conf.Pending, currentState) {
found = true
targetOccurrence = 0
for _, allowed := range conf.Pending {
if currentState == allowed {
found = true
targetOccurrence = 0
break
}
}
if !found && len(conf.Pending) > 0 {
@@ -276,8 +278,8 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
// NotFoundError resource not found error
type NotFoundError struct {
LastError error
LastRequest any
LastResponse any
LastRequest interface{}
LastResponse interface{}
Message string
Retries int
}

View File

@@ -27,7 +27,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
@@ -424,7 +424,7 @@ func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
})
// Set our own http client in the context
ctx = oauthutil.Context(ctx, baseClient)
// create a new oauth client, reuse the token source
// create a new oauth client, re-use the token source
oAuthClient := oauth2.NewClient(ctx, f.ts)
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
}
@@ -631,7 +631,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
err = f.listHelper(ctx, dir, true, func(o fs.DirEntry) error {
return list.Add(o)
})
@@ -990,7 +990,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
if err != nil {
return nil, err
}
free := max(q.Quota-q.UsedQuota, 0)
free := q.Quota - q.UsedQuota
if free < 0 {
free = 0
}
usage = &fs.Usage{
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
Used: fs.NewUsageValue(q.UsedQuota), // bytes in use
@@ -1321,7 +1324,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
// sometimes pcloud leaves a half complete file on
// error, so delete it if it exists, trying a few times
for range 5 {
for i := 0; i < 5; i++ {
delObj, delErr := o.fs.NewObject(ctx, o.remote)
if delErr == nil && delObj != nil {
_ = delObj.Remove(ctx)

View File

@@ -37,7 +37,7 @@ func (c *writerAt) Close() error {
}
sizeOk := false
sizeLastSeen := int64(0)
for retry := range 5 {
for retry := 0; retry < 5; retry++ {
fs.Debugf(c.remote, "checking file size: try %d/5", retry)
obj, err := c.fs.NewObject(c.ctx, c.remote)
if err != nil {

View File

@@ -71,14 +71,14 @@ type Error struct {
// ErrorDetails contains further details of api error
type ErrorDetails struct {
Type string `json:"@type,omitempty"`
Reason string `json:"reason,omitempty"`
Domain string `json:"domain,omitempty"`
Metadata struct{} `json:"metadata,omitempty"` // TODO: undiscovered yet
Locale string `json:"locale,omitempty"` // e.g. "en"
Message string `json:"message,omitempty"`
StackEntries []any `json:"stack_entries,omitempty"` // TODO: undiscovered yet
Detail string `json:"detail,omitempty"`
Type string `json:"@type,omitempty"`
Reason string `json:"reason,omitempty"`
Domain string `json:"domain,omitempty"`
Metadata struct{} `json:"metadata,omitempty"` // TODO: undiscovered yet
Locale string `json:"locale,omitempty"` // e.g. "en"
Message string `json:"message,omitempty"`
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
Detail string `json:"detail,omitempty"`
}
// Error returns a string for the error and satisfies the error interface
@@ -168,44 +168,44 @@ type FileList struct {
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
// However, it is not generally applicable as it is only for media.
type File struct {
Apps []*FileApp `json:"apps,omitempty"`
Audit *FileAudit `json:"audit,omitempty"`
Collection string `json:"collection,omitempty"` // TODO
CreatedTime Time `json:"created_time,omitempty"`
DeleteTime Time `json:"delete_time,omitempty"`
FileCategory string `json:"file_category,omitempty"` // "AUDIO", "VIDEO"
FileExtension string `json:"file_extension,omitempty"`
FolderType string `json:"folder_type,omitempty"`
Hash string `json:"hash,omitempty"` // custom hash with a form of sha1sum
IconLink string `json:"icon_link,omitempty"`
ID string `json:"id,omitempty"`
Kind string `json:"kind,omitempty"` // "drive#file"
Links *FileLinks `json:"links,omitempty"`
Md5Checksum string `json:"md5_checksum,omitempty"`
Medias []*Media `json:"medias,omitempty"`
MimeType string `json:"mime_type,omitempty"`
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
Name string `json:"name,omitempty"`
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
OriginalURL string `json:"original_url,omitempty"`
Params *FileParams `json:"params,omitempty"`
ParentID string `json:"parent_id,omitempty"`
Phase string `json:"phase,omitempty"`
Revision int `json:"revision,omitempty,string"`
ReferenceEvents []any `json:"reference_events"`
ReferenceResource any `json:"reference_resource"`
Size int64 `json:"size,omitempty,string"`
SortName string `json:"sort_name,omitempty"`
Space string `json:"space,omitempty"`
SpellName []any `json:"spell_name,omitempty"` // TODO maybe list of something?
Starred bool `json:"starred,omitempty"`
Tags []any `json:"tags"`
ThumbnailLink string `json:"thumbnail_link,omitempty"`
Trashed bool `json:"trashed,omitempty"`
UserID string `json:"user_id,omitempty"`
UserModifiedTime Time `json:"user_modified_time,omitempty"`
WebContentLink string `json:"web_content_link,omitempty"`
Writable bool `json:"writable,omitempty"`
Apps []*FileApp `json:"apps,omitempty"`
Audit *FileAudit `json:"audit,omitempty"`
Collection string `json:"collection,omitempty"` // TODO
CreatedTime Time `json:"created_time,omitempty"`
DeleteTime Time `json:"delete_time,omitempty"`
FileCategory string `json:"file_category,omitempty"` // "AUDIO", "VIDEO"
FileExtension string `json:"file_extension,omitempty"`
FolderType string `json:"folder_type,omitempty"`
Hash string `json:"hash,omitempty"` // custom hash with a form of sha1sum
IconLink string `json:"icon_link,omitempty"`
ID string `json:"id,omitempty"`
Kind string `json:"kind,omitempty"` // "drive#file"
Links *FileLinks `json:"links,omitempty"`
Md5Checksum string `json:"md5_checksum,omitempty"`
Medias []*Media `json:"medias,omitempty"`
MimeType string `json:"mime_type,omitempty"`
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
Name string `json:"name,omitempty"`
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
OriginalURL string `json:"original_url,omitempty"`
Params *FileParams `json:"params,omitempty"`
ParentID string `json:"parent_id,omitempty"`
Phase string `json:"phase,omitempty"`
Revision int `json:"revision,omitempty,string"`
ReferenceEvents []interface{} `json:"reference_events"`
ReferenceResource interface{} `json:"reference_resource"`
Size int64 `json:"size,omitempty,string"`
SortName string `json:"sort_name,omitempty"`
Space string `json:"space,omitempty"`
SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something?
Starred bool `json:"starred,omitempty"`
Tags []interface{} `json:"tags"`
ThumbnailLink string `json:"thumbnail_link,omitempty"`
Trashed bool `json:"trashed,omitempty"`
UserID string `json:"user_id,omitempty"`
UserModifiedTime Time `json:"user_modified_time,omitempty"`
WebContentLink string `json:"web_content_link,omitempty"`
Writable bool `json:"writable,omitempty"`
}
// FileLinks includes links to file at backend
@@ -235,18 +235,18 @@ type Media struct {
VideoType string `json:"video_type,omitempty"` // "mpegts"
HdrType string `json:"hdr_type,omitempty"`
} `json:"video,omitempty"`
Link *Link `json:"link,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
VipTypes []any `json:"vip_types,omitempty"` // TODO maybe list of something?
RedirectLink string `json:"redirect_link,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Priority int `json:"priority,omitempty"`
IsOrigin bool `json:"is_origin,omitempty"`
ResolutionName string `json:"resolution_name,omitempty"`
IsVisible bool `json:"is_visible,omitempty"`
Category string `json:"category,omitempty"` // "category_origin"
Audio any `json:"audio"` // TODO: undiscovered yet
Link *Link `json:"link,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
VipTypes []interface{} `json:"vip_types,omitempty"` // TODO maybe list of something?
RedirectLink string `json:"redirect_link,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Priority int `json:"priority,omitempty"`
IsOrigin bool `json:"is_origin,omitempty"`
ResolutionName string `json:"resolution_name,omitempty"`
IsVisible bool `json:"is_visible,omitempty"`
Category string `json:"category,omitempty"` // "category_origin"
Audio interface{} `json:"audio"` // TODO: undiscovered yet
}
// FileParams includes parameters for instant open
@@ -263,20 +263,20 @@ type FileParams struct {
// FileApp includes parameters for instant open
type FileApp struct {
ID string `json:"id,omitempty"` // "decompress" for rar files
Name string `json:"name,omitempty"` // decompress" for rar files
Access []any `json:"access,omitempty"`
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
RedirectLink string `json:"redirect_link,omitempty"`
VipTypes []any `json:"vip_types,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Params struct{} `json:"params,omitempty"` // TODO
CategoryIDs []any `json:"category_ids,omitempty"`
AdSceneType int `json:"ad_scene_type,omitempty"`
Space string `json:"space,omitempty"`
Links struct{} `json:"links,omitempty"` // TODO
ID string `json:"id,omitempty"` // "decompress" for rar files
Name string `json:"name,omitempty"` // decompress" for rar files
Access []interface{} `json:"access,omitempty"`
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
RedirectLink string `json:"redirect_link,omitempty"`
VipTypes []interface{} `json:"vip_types,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Params struct{} `json:"params,omitempty"` // TODO
CategoryIDs []interface{} `json:"category_ids,omitempty"`
AdSceneType int `json:"ad_scene_type,omitempty"`
Space string `json:"space,omitempty"`
Links struct{} `json:"links,omitempty"` // TODO
}
// ------------------------------------------------------------
@@ -290,27 +290,27 @@ type TaskList struct {
// Task is a basic element representing a single task such as offline download and upload
type Task struct {
Kind string `json:"kind,omitempty"` // "drive#task"
ID string `json:"id,omitempty"` // task id?
Name string `json:"name,omitempty"` // torrent name?
Type string `json:"type,omitempty"` // "offline"
UserID string `json:"user_id,omitempty"`
Statuses []any `json:"statuses,omitempty"` // TODO
StatusSize int `json:"status_size,omitempty"` // TODO
Params *TaskParams `json:"params,omitempty"` // TODO
FileID string `json:"file_id,omitempty"`
FileName string `json:"file_name,omitempty"`
FileSize string `json:"file_size,omitempty"`
Message string `json:"message,omitempty"` // e.g. "Saving"
CreatedTime Time `json:"created_time,omitempty"`
UpdatedTime Time `json:"updated_time,omitempty"`
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
Progress int `json:"progress,omitempty"`
IconLink string `json:"icon_link,omitempty"`
Callback string `json:"callback,omitempty"`
ReferenceResource any `json:"reference_resource,omitempty"` // TODO
Space string `json:"space,omitempty"`
Kind string `json:"kind,omitempty"` // "drive#task"
ID string `json:"id,omitempty"` // task id?
Name string `json:"name,omitempty"` // torrent name?
Type string `json:"type,omitempty"` // "offline"
UserID string `json:"user_id,omitempty"`
Statuses []interface{} `json:"statuses,omitempty"` // TODO
StatusSize int `json:"status_size,omitempty"` // TODO
Params *TaskParams `json:"params,omitempty"` // TODO
FileID string `json:"file_id,omitempty"`
FileName string `json:"file_name,omitempty"`
FileSize string `json:"file_size,omitempty"`
Message string `json:"message,omitempty"` // e.g. "Saving"
CreatedTime Time `json:"created_time,omitempty"`
UpdatedTime Time `json:"updated_time,omitempty"`
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
Progress int `json:"progress,omitempty"`
IconLink string `json:"icon_link,omitempty"`
Callback string `json:"callback,omitempty"`
ReferenceResource interface{} `json:"reference_resource,omitempty"` // TODO
Space string `json:"space,omitempty"`
}
// TaskParams includes parameters informing status of Task

View File

@@ -638,7 +638,7 @@ func (c *pikpakClient) SetCaptchaTokener(ctx context.Context, m configmap.Mapper
return c
}
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request any, response any) (resp *http.Response, err error) {
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
if c.captcha != nil {
token, err := c.captcha.Token(opts)
if err != nil || token == "" {

View File

@@ -1232,7 +1232,7 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
params := url.Values{}
iVal := reflect.ValueOf(&form.MultiParts).Elem()
iTyp := iVal.Type()
for i := range iVal.NumField() {
for i := 0; i < iVal.NumField(); i++ {
params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String())
}
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name)
@@ -1520,7 +1520,7 @@ Result:
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "addurl":
if len(arg) != 1 {

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"net/http"
"slices"
"strconv"
"time"
@@ -14,8 +13,10 @@ import (
)
func checkStatusCode(resp *http.Response, expected ...int) error {
if slices.Contains(expected, resp.StatusCode) {
return nil
for _, code := range expected {
if resp.StatusCode == code {
return nil
}
}
return &statusCodeError{response: resp}
}

View File

@@ -332,7 +332,10 @@ func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.
var offsetMismatch bool
buf := make([]byte, defaultChunkSize)
for clientOffset < size {
chunkSize := min(size-clientOffset, int64(defaultChunkSize))
chunkSize := size - clientOffset
if chunkSize >= int64(defaultChunkSize) {
chunkSize = int64(defaultChunkSize)
}
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
chunkStart := clientOffset
reqSize := chunkSize

View File

@@ -22,7 +22,7 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
qsConfig "github.com/yunify/qingstor-sdk-go/v3/config"
@@ -704,7 +704,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)

View File

@@ -358,7 +358,7 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) (err error) {
})()
ch := make(chan chunk, mu.cfg.concurrency)
for range mu.cfg.concurrency {
for i := 0; i < mu.cfg.concurrency; i++ {
mu.wg.Add(1)
go mu.readChunk(ch)
}

View File

@@ -15,7 +15,6 @@ import (
"net/http"
"net/url"
"path"
"slices"
"strconv"
"strings"
"time"
@@ -644,8 +643,10 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
return err
}
if slices.Contains(result.IDs, id) {
return nil
for _, removedID := range result.IDs {
if removedID == id {
return nil
}
}
return fmt.Errorf("file %s was not deleted successfully", id)

View File

@@ -59,7 +59,11 @@ func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed f
defer func() { u.fileUsage[fileID] = borrowed }()
effectiveChunkSize := max(int64(speed*u.effectiveTime.Seconds()), u.reserved)
effectiveChunkSize := int64(speed * u.effectiveTime.Seconds())
if effectiveChunkSize < u.reserved {
effectiveChunkSize = u.reserved
}
if neededMemory < effectiveChunkSize {
effectiveChunkSize = neededMemory

View File

@@ -1,59 +0,0 @@
package s3
import (
"context"
"net/http"
"time"
"github.com/IBM/go-sdk-core/v5/core"
"github.com/aws/aws-sdk-go-v2/aws"
v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
)
// Authenticator defines an interface for obtaining an IAM token.
type Authenticator interface {
GetToken() (string, error)
}
// IbmIamSigner is a structure for signing requests using IBM IAM.
// Requeres APIKey and Resource InstanceID
type IbmIamSigner struct {
APIKey string
InstanceID string
Auth Authenticator
}
// SignHTTP signs requests using IBM IAM token.
func (signer *IbmIamSigner) SignHTTP(ctx context.Context, credentials aws.Credentials, req *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4signer.SignerOptions)) error {
var authenticator Authenticator
if signer.Auth != nil {
authenticator = signer.Auth
} else {
authenticator = &core.IamAuthenticator{ApiKey: signer.APIKey}
}
token, err := authenticator.GetToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
req.Header.Set("ibm-service-instance-id", signer.InstanceID)
return nil
}
// NoOpCredentialsProvider is needed since S3 SDK requires having credentials, even though authentication is happening via IBM IAM.
type NoOpCredentialsProvider struct{}
// Retrieve returns mock credentials for the NoOpCredentialsProvider.
func (n *NoOpCredentialsProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {
return aws.Credentials{
AccessKeyID: "NoOpAccessKey",
SecretAccessKey: "NoOpSecretKey",
SessionToken: "",
Source: "NoOpCredentialsProvider",
}, nil
}
// IsExpired always returns false
func (n *NoOpCredentialsProvider) IsExpired() bool {
return false
}

Some files were not shown because too many files have changed in this diff Show More