1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Anagh Kumar Baranwal
f26e41d1c5 fix: mount parsing for linux under the WSL layer which may contain mounts with spaces
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-07-18 17:39:25 +05:30
1708 changed files with 78481 additions and 267106 deletions

4
.gitattributes vendored
View File

@@ -1,7 +1,3 @@
# Go writes go.mod and go.sum with lf even on windows
go.mod text eol=lf
go.sum text eol=lf
# Ignore generated files in GitHub language statistics and diffs
/MANUAL.* linguist-generated=true
/rclone.1 linguist-generated=true

View File

@@ -17,21 +17,22 @@ on:
manual:
description: Manual run (bypass default conditions)
type: boolean
required: true
default: true
jobs:
build:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.23']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
include:
- job_name: linux
os: ubuntu-latest
go: '>=1.24.0-rc.1'
go: '1.21.0-rc.3'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -42,14 +43,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '>=1.24.0-rc.1'
go: '1.21.0-rc.3'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-latest
go: '>=1.24.0-rc.1'
os: macos-11
go: '1.21.0-rc.3'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -57,15 +58,15 @@ jobs:
deploy: true
- job_name: mac_arm64
os: macos-latest
go: '>=1.24.0-rc.1'
os: macos-11
go: '1.21.0-rc.3'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '>=1.24.0-rc.1'
go: '1.21.0-rc.3'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -75,14 +76,20 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '>=1.24.0-rc.1'
go: '1.21.0-rc.3'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.23
- job_name: go1.19
os: ubuntu-latest
go: '1.23'
go: '1.19'
quicktest: true
racequicktest: true
- job_name: go1.20
os: ubuntu-latest
go: '1.20'
quicktest: true
racequicktest: true
@@ -92,12 +99,12 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v5
uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go }}
check-latest: true
@@ -117,8 +124,7 @@ jobs:
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get update
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
@@ -131,8 +137,7 @@ jobs:
brew untap --force homebrew/cask
brew update
brew install --cask macfuse
brew install git-annex git-annex-remote-rclone
if: matrix.os == 'macos-latest'
if: matrix.os == 'macos-11'
- name: Install Libraries on Windows
shell: powershell
@@ -162,6 +167,14 @@ jobs:
printf "\n\nSystem environment:\n\n"
env
- name: Go module cache
uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Build rclone
shell: bash
run: |
@@ -203,6 +216,7 @@ jobs:
shell: bash
run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
make ci_beta
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
@@ -211,79 +225,27 @@ jobs:
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 30
name: "lint"
runs-on: ubuntu-latest
steps:
- name: Get runner parameters
id: get-runner-parameters
shell: bash
run: |
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
uses: actions/checkout@v3
- name: Code quality test
uses: golangci/golangci-lint-action@v3
with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
# Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go
id: setup-go
uses: actions/setup-go@v5
uses: actions/setup-go@v4
with:
go-version: '>=1.23.0-rc.1'
go-version: '1.21.0-rc.3'
check-latest: true
cache: false
- name: Cache
uses: actions/cache@v4
with:
path: |
~/go/pkg/mod
~/.cache/go-build
~/.cache/golangci-lint
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
- name: Code quality test (Linux)
uses: golangci/golangci-lint-action@v6
with:
version: latest
skip-cache: true
- name: Code quality test (Windows)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "windows"
with:
version: latest
skip-cache: true
- name: Code quality test (macOS)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "darwin"
with:
version: latest
skip-cache: true
- name: Code quality test (FreeBSD)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "freebsd"
with:
version: latest
skip-cache: true
- name: Code quality test (OpenBSD)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "openbsd"
with:
version: latest
skip-cache: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
@@ -291,27 +253,31 @@ jobs:
- name: Scan for vulnerabilities
run: govulncheck ./...
- name: Scan edits of autogenerated files
run: bin/check_autogenerated_edits.py
if: github.event_name == 'pull_request'
android:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@v4
with:
go-version: '>=1.24.0-rc.1'
go-version: '1.21.0-rc.3'
- name: Go module cache
uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Set global environment variables
shell: bash

View File

@@ -0,0 +1,61 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v4
with:
images: ghcr.io/${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
# GitHub Actions at the start of a workflow run to identify the job.
# This is used to authenticate against GitHub Container Registry.
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
# for more detailed information.
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and publish image
uses: docker/build-push-action@v4
with:
file: Dockerfile
context: .
push: true # push the image to ghcr
tags: |
ghcr.io/rclone/rclone:beta
rclone/rclone:beta
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
cache-from: type=gha
cache-to: type=gha,mode=max
provenance: false
# Eventually cache will need to be cleared if builds more frequent than once a week
# https://github.com/docker/build-push-action/issues/252

View File

@@ -1,294 +0,0 @@
---
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
name: Build & Push Docker Images
# Trigger the workflow on push or pull request
on:
push:
branches:
- '**'
tags:
- '**'
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
build-image:
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
include:
- platform: linux/amd64
runs-on: ubuntu-24.04
- platform: linux/386
runs-on: ubuntu-24.04
- platform: linux/arm64
runs-on: ubuntu-24.04-arm
- platform: linux/arm/v7
runs-on: ubuntu-24.04-arm
- platform: linux/arm/v6
runs-on: ubuntu-24.04-arm
name: Build Docker Image for ${{ matrix.platform }}
runs-on: ${{ matrix.runs-on }}
steps:
- name: Free Space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout Repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set REPO_NAME Variable
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Set PLATFORM Variable
run: |
platform=${{ matrix.platform }}
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
- name: Set CACHE_NAME Variable
shell: python
run: |
import os, re
def slugify(input_string, max_length=63):
slug = input_string.lower()
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
slug = slug.strip()
slug = re.sub(r'\s+', '-', slug)
slug = re.sub(r'-+', '-', slug)
slug = slug[:max_length]
slug = re.sub(r'[-]+$', '', slug)
return slug
ref_name_slug = "cache"
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"CACHE_NAME={ref_name_slug}\n")
- name: Get ImageOS
# There's no way around this, because "ImageOS" is only available to
# processes, but the setup-go action uses it in its key.
id: imageos
uses: actions/github-script@v7
with:
result-encoding: string
script: |
return process.env.ImageOS
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
with:
images: |
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Setup QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Load Go Build Cache for Docker
id: go-cache
uses: actions/cache@v4
with:
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
# Cache only the go builds, the module download is cached via the docker layer caching
path: |
go-build-cache
- name: Inject Go Build Cache into Docker
uses: reproducible-containers/buildkit-cache-dance@v3
with:
cache-map: |
{
"go-build-cache": "/root/.cache/go-build"
}
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and Publish Image Digest
id: build
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
provenance: false
# don't specify 'tags' here (error "get can't push tagged ref by digest")
# tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
annotations: ${{ steps.meta.outputs.annotations }}
platforms: ${{ matrix.platform }}
outputs: |
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
cache-from: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
cache-to: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
- name: Export Image Digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload Image Digest
uses: actions/upload-artifact@v4
with:
name: digests-${{ env.PLATFORM }}
path: /tmp/digests/*
retention-days: 1
if-no-files-found: error
merge-image:
name: Merge & Push Final Docker Image
runs-on: ubuntu-24.04
needs:
- build-image
steps:
- name: Download Image Digests
uses: actions/download-artifact@v4
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- name: Set REPO_NAME Variable
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
with:
images: |
${{ env.REPO_NAME }}
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Extract Tags
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
tags_string = " ".join(tags)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"TAGS={tags_string}\n")
- name: Extract Annotations
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
annotations_string = " ".join(annotations)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"ANNOTATIONS={annotations_string}\n")
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create & Push Manifest List
working-directory: /tmp/digests
run: |
docker buildx imagetools create \
${{ env.TAGS }} \
${{ env.ANNOTATIONS }} \
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
- name: Inspect and Run Multi-Platform Image
run: |
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version

View File

@@ -1,49 +0,0 @@
---
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
name: Release Build for Docker Plugin
on:
release:
types: [published]
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
build_docker_volume_plugin:
if: inputs.manual || github.repository == 'rclone/rclone'
name: Build docker plugin job
runs-on: ubuntu-latest
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

View File

@@ -0,0 +1,59 @@
name: Docker release build
on:
release:
types: [published]
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Get actual patch version
id: actual_patch_version
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
- name: Get actual minor version
id: actual_minor_version
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'
needs: build
runs-on: ubuntu-latest
name: Build docker plugin job
steps:
- name: Checkout master
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

View File

@@ -1,15 +0,0 @@
name: Notify users based on issue labels
on:
issues:
types: [labeled]
jobs:
notify:
runs-on: ubuntu-latest
steps:
- uses: jenschelkopf/issue-label-notification-action@1.3
with:
token: ${{ secrets.NOTIFY_ACTION_TOKEN }}
recipients: |
Support Contract=@rclone/support

View File

@@ -1,14 +1,14 @@
name: Publish to Winget
on:
release:
types: [released]
jobs:
publish:
runs-on: ubuntu-latest
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: Rclone.Rclone
installers-regex: '-windows-\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}
name: Publish to Winget
on:
release:
types: [released]
jobs:
publish:
runs-on: windows-latest # Action can only run on Windows
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: Rclone.Rclone
installers-regex: '-windows-\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}

9
.gitignore vendored
View File

@@ -3,20 +3,15 @@ _junk/
rclone
rclone.exe
build
/docs/public/
/docs/.hugo_build.lock
/docs/static/img/logos/
docs/public
rclone.iml
.idea
.history
.vscode
*.test
*.log
*.iml
fuzz-build.zip
*.orig
*.rej
Thumbs.db
__pycache__
.DS_Store
resource_windows_*.syso
.devcontainer

View File

@@ -13,7 +13,6 @@ linters:
- stylecheck
- unused
- misspell
- gocritic
#- prealloc
#- maligned
disable-all: true
@@ -34,111 +33,24 @@ issues:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
# don't disable the revive messages about comments on exported functions
include:
- EXC0012
- EXC0013
- EXC0014
- EXC0015
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m
linters-settings:
revive:
# setting rules seems to disable all the rules, so re-enable them here
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
- name: empty-block
disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
- name: increment-decrement
disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
- name: empty-block
disabled: true
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
gocritic:
# Enable all default checks with some exceptions and some additions (commented).
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
disable-all: true
enabled-checks:
#- appendAssign # Enabled by default
- argOrder
- assignOp
- badCall
- badCond
#- captLocal # Enabled by default
- caseOrder
- codegenComment
#- commentFormatting # Enabled by default
- defaultCaseOrder
- deprecatedComment
- dupArg
- dupBranchBody
- dupCase
- dupSubExpr
- elseif
#- exitAfterDefer # Enabled by default
- flagDeref
- flagName
#- ifElseChain # Enabled by default
- mapKey
- newDeref
- offBy1
- regexpMust
- ruleguard # Not enabled by default
#- singleCaseSwitch # Enabled by default
- sloppyLen
- sloppyTypeAssert
- switchTrue
- typeSwitchVar
- underef
- unlambda
- unslice
- valSwap
- wrapperFunc
settings:
ruleguard:
rules: "${configDir}/bin/rules.go"

View File

@@ -1,8 +1,8 @@
# Contributing to rclone
# Contributing to rclone #
This is a short guide on how to contribute things to rclone.
## Reporting a bug
## Reporting a bug ##
If you've just got a question or aren't sure if you've found a bug
then please use the [rclone forum](https://forum.rclone.org/) instead
@@ -12,13 +12,13 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/):
- Rclone version (e.g. output from `rclone version`)
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
- A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
- if the log contains secrets then edit the file with a text editor first to obscure them
* Rclone version (e.g. output from `rclone version`)
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a new feature or bug fix
## Submitting a new feature or bug fix ##
If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub.
@@ -73,9 +73,9 @@ This is typically enough if you made a simple bug fix, otherwise please read the
Make sure you
- Add [unit tests](#testing) for a new feature.
- Add [documentation](#writing-documentation) for a new feature.
- [Commit your changes](#committing-your-changes) using the [commit message guidelines](#commit-messages).
* Add [unit tests](#testing) for a new feature.
* Add [documentation](#writing-documentation) for a new feature.
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
When you are done with that push your changes to GitHub:
@@ -88,9 +88,9 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
## Using Git and GitHub
## Using Git and GitHub ##
### Committing your changes
### Committing your changes ###
Follow the guideline for [commit messages](#commit-messages) and then:
@@ -107,7 +107,7 @@ You can modify the message or changes in the latest commit using:
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits
### Replacing your previously pushed commits ###
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
@@ -115,7 +115,7 @@ Your previously pushed commits are replaced by:
git push --force origin my-new-feature
### Basing your changes on the latest master
### Basing your changes on the latest master ###
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
@@ -149,21 +149,13 @@ If you squash commits that have been pushed to GitHub, then you will have to [re
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
### GitHub Continuous Integration
### GitHub Continuous Integration ###
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
## Testing
## Testing ##
### Code quality tests
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
You can run them with `make check` or with `golangci-lint run ./...`.
Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
### Quick testing
### Quick testing ###
rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests.
@@ -176,7 +168,7 @@ You can also use `make`, if supported by your platform
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
### Backend testing
### Backend testing ###
rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud
@@ -209,9 +201,9 @@ altogether with an HTML report and test retries then from the
project root:
go install github.com/rclone/rclone/fstest/test_all
test_all -backends drive
test_all -backend drive
### Full integration testing
### Full integration testing ###
If you want to run all the integration tests against all the remotes,
then change into the project root and run
@@ -226,56 +218,55 @@ The commands may require some extra go packages which you can install with
The full integration tests are run daily on the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/
## Code Organisation
## Code Organisation ##
Rclone code is organised into a small number of top level directories
with modules beneath.
- backend - the rclone backends for interfacing to cloud providers -
- all - import this to load all the cloud providers
- ...providers
- bin - scripts for use while building or maintaining rclone
- cmd - the rclone commands
- all - import this to load all the commands
- ...commands
- cmdtest - end-to-end tests of commands, flags, environment variables,...
- docs - the documentation and website
- content - adjust these docs only - everything else is autogenerated
- command - these are auto-generated - edit the corresponding .go file
- fs - main rclone definitions - minimal amount of code
- accounting - bandwidth limiting and statistics
- asyncreader - an io.Reader which reads ahead
- config - manage the config file and flags
- driveletter - detect if a name is a drive letter
- filter - implements include/exclude filtering
- fserrors - rclone specific error handling
- fshttp - http handling for rclone
- fspath - path handling for rclone
- hash - defines rclone's hash types and functions
- list - list a remote
- log - logging facilities
- march - iterates directories in lock step
- object - in memory Fs objects
- operations - primitives for sync, e.g. Copy, Move
- sync - sync directories
- walk - walk a directory
- fstest - provides integration test framework
- fstests - integration tests for the backends
- mockdir - mocks an fs.Directory
- mockobject - mocks an fs.Object
- test_all - Runs integration tests for everything
- graphics - the images used in the website, etc.
- lib - libraries used by the backend
- atexit - register functions to run when rclone exits
- dircache - directory ID to name caching
- oauthutil - helpers for using oauth
- pacer - retries with backoff and paces operations
- readers - a selection of useful io.Readers
- rest - a thin abstraction over net/http for REST
- librclone - in memory interface to rclone's API for embedding rclone
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
* backend - the rclone backends for interfacing to cloud providers -
* all - import this to load all the cloud providers
* ...providers
* bin - scripts for use while building or maintaining rclone
* cmd - the rclone commands
* all - import this to load all the commands
* ...commands
* cmdtest - end-to-end tests of commands, flags, environment variables,...
* docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated
* command - these are auto-generated - edit the corresponding .go file
* fs - main rclone definitions - minimal amount of code
* accounting - bandwidth limiting and statistics
* asyncreader - an io.Reader which reads ahead
* config - manage the config file and flags
* driveletter - detect if a name is a drive letter
* filter - implements include/exclude filtering
* fserrors - rclone specific error handling
* fshttp - http handling for rclone
* fspath - path handling for rclone
* hash - defines rclone's hash types and functions
* list - list a remote
* log - logging facilities
* march - iterates directories in lock step
* object - in memory Fs objects
* operations - primitives for sync, e.g. Copy, Move
* sync - sync directories
* walk - walk a directory
* fstest - provides integration test framework
* fstests - integration tests for the backends
* mockdir - mocks an fs.Directory
* mockobject - mocks an fs.Object
* test_all - Runs integration tests for everything
* graphics - the images used in the website, etc.
* lib - libraries used by the backend
* atexit - register functions to run when rclone exits
* dircache - directory ID to name caching
* oauthutil - helpers for using oauth
* pacer - retries with backoff and paces operations
* readers - a selection of useful io.Readers
* rest - a thin abstraction over net/http for REST
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation
## Writing Documentation ##
If you are adding a new feature then please update the documentation.
@@ -286,22 +277,22 @@ alphabetical order.
If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field.
- Start with the most important information about the option,
* Start with the most important information about the option,
as a single sentence on a single line.
- This text will be used for the command-line flag help.
- It will be combined with other information, such as any default value,
* This text will be used for the command-line flag help.
* It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence.
- It should end with a period/full stop character, which will be shown
* It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help.
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
- Like with docs generated from Markdown, a single line break is ignored
* Try to keep it below 80 characters, to reduce text wrapping in the terminal.
* More details can be added in a new paragraph, after an empty line (`"\n\n"`).
* Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph.
- This text will be shown to the user in `rclone config`
* This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release).
- To create options of enumeration type use the `Examples:` field.
- Each example value have their own `Help:` field, but they are treated
* To create options of enumeration type use the `Examples:` field.
* Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of
@@ -321,12 +312,12 @@ combined unmodified with other information (such as any default value).
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy.
## Making a release
## Making a release ##
There are separate instructions for making a release in the RELEASE.md
file.
## Commit messages
## Commit messages ##
Please make the first line of your commit message a summary of the
change that a user (not a developer) of rclone would like to read, and
@@ -367,7 +358,7 @@ error fixing the hang.
Fixes #1498
```
## Adding a dependency
## Adding a dependency ##
rclone uses the [go
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
@@ -379,7 +370,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency and add it to
`go.mod` and `go.sum`.
go get github.com/ncw/new_dependency
GO111MODULE=on go get github.com/ncw/new_dependency
You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to.
@@ -387,15 +378,15 @@ go docs linked above), but don't unless you really need to.
Please check in the changes generated by `go mod` including `go.mod`
and `go.sum` in the same commit as your other changes.
## Updating a dependency
## Updating a dependency ##
If you need to update a dependency then run
go get golang.org/x/crypto
GO111MODULE=on go get -u golang.org/x/crypto
Check in a single commit as above.
## Updating all the dependencies
## Updating all the dependencies ##
In order to update all the dependencies then run `make update`. This
just uses the go modules to update all the modules to their latest
@@ -404,7 +395,7 @@ stable release. Check in the changes in a single commit as above.
This should be done early in the release cycle to pick up new versions
of packages in time for them to get some testing.
## Updating a backend
## Updating a backend ##
If you update a backend then please run the unit tests and the
integration tests for that backend.
@@ -419,133 +410,82 @@ integration tests.
The next section goes into more detail about the tests.
## Writing a new backend
## Writing a new backend ##
Choose a name. The docs here will use `remote` as an example.
Note that in rclone terminology a file system backend is called a
remote or an fs.
### Research
Research
- Look at the interfaces defined in `fs/types.go`
- Study one or more of the existing remotes
* Look at the interfaces defined in `fs/fs.go`
* Study one or more of the existing remotes
### Getting going
Getting going
- Create `backend/remote/remote.go` (copy this from a similar remote)
- box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache)
- b2 is a good one to start from if you have a bucket-based remote
- Add your remote to the imports in `backend/all/all.go`
- HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead.
- Try to implement as many optional methods as possible as it makes the remote more usable.
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
- `rclone purge -v TestRemote:rclone-info`
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
- open `remote.csv` in a spreadsheet and examine
* Create `backend/remote/remote.go` (copy this from a similar remote)
* box is a good one to start from if you have a directory-based remote
* b2 is a good one to start from if you have a bucket-based remote
* Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable.
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `rclone purge -v TestRemote:rclone-info`
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine
### Guidelines for a speedy merge
Unit tests
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend.
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
- **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments.
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
- **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
* Create a config entry called `TestRemote` for the unit tests to use
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
* Make sure all tests pass with `go test -v`
### Unit tests
Integration tests
- Create a config entry called `TestRemote` for the unit tests to use
- Create a `backend/remote/remote_test.go` - copy and adjust your example remote
- Make sure all tests pass with `go test -v`
### Integration tests
- Add your backend to `fstest/test_all/config.yaml`
- Once you've done that then you can use the integration test framework from the project root:
- go install ./...
- test_all -backends remote
* Add your backend to `fstest/test_all/config.yaml`
* Once you've done that then you can use the integration test framework from the project root:
* go install ./...
* test_all -backends remote
Or if you want to run the integration tests manually:
- Make sure integration tests pass with
- `cd fs/operations`
- `go test -v -remote TestRemote:`
- `cd fs/sync`
- `go test -v -remote TestRemote:`
- If your remote defines `ListR` check with this also
- `go test -v -remote TestRemote: -fast-list`
* Make sure integration tests pass with
* `cd fs/operations`
* `go test -v -remote TestRemote:`
* `cd fs/sync`
* `go test -v -remote TestRemote:`
* If your remote defines `ListR` check with this also
* `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests.
### Backend documentation
Add your backend to the docs - you'll need to pick an icon for it from
Add your fs to the docs - you'll need to pick an icon for it from
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
alphabetical order of full name of remote (e.g. `drive` is ordered as
`Google Drive`) but with the local file system last.
- `README.md` - main GitHub page
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
- update them in your backend with `bin/make_backend_docs.py remote`
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
- `docs/content/docs.md` - list of remotes in config section
- `docs/content/_index.md` - front page of rclone.org
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
- `bin/make_manual.py` - add the page to the `docs` constant
* `README.md` - main GitHub page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
* update them with `make backenddocs` - revert any changes in other backends
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/_index.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
## Adding a new s3 provider
It is quite easy to add a new S3 provider to rclone.
You'll need to modify the following files
- `backend/s3/s3.go`
- Add the provider to `providerOption` at the top of the file
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
- Exclude your provider from generic config questions (eg `region` and `endpoint).
- Add the provider to the `setQuirks` function - see the documentation there.
- `docs/content/s3.md`
- Add the provider at the top of the page.
- Add a section about the provider linked from there.
- Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md`
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
- `README.md` - this is the home page in github
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- `docs/content/_index.md` - this is the home page of rclone.org
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
When adding the provider, endpoints, quirks, docs etc keep them in
alphabetical order by `Provider` name, but with `AWS` first and
`Other` last.
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
Once you've written the code, test `rclone config` works to your
satisfaction, and check the integration tests work `go test -v -remote
NewS3Provider:`. You may need to adjust the quirks to get them to
pass. Some providers just can't pass the tests with control characters
in the names so if these fail and the provider doesn't support
`urlEncodeListings` in the quirks then ignore them. Note that the
`SetTier` test may also fail on non AWS providers.
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
## Writing a plugin
## Writing a plugin ##
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
### Usage
Usage
- Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
@@ -560,7 +500,7 @@ This is useful if you can't merge your changes upstream or don't want to maintai
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone)
### Building
Building
To turn your existing additions into a Go plugin, move them to an external repository
and change the top-level package name to `main`.
@@ -572,19 +512,3 @@ Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
## Keeping a backend or command out of tree
Rclone was designed to be modular so it is very easy to keep a backend
or a command out of the main rclone source tree.
So for example if you had a backend which accessed your proprietary
systems or a command which was specialised for your needs you could
add them out of tree.
This may be easier than using a plugin and is supported on all
platforms not just macOS and Linux.
This is explained further in https://github.com/rclone/rclone_out_of_tree_example
which has an example of an out of tree backend `ram` (which is a
renamed version of the `memory` backend).

View File

@@ -1,47 +1,18 @@
FROM golang:alpine AS builder
ARG CGO_ENABLED=0
FROM golang AS builder
COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/
RUN echo "**** Set Go Environment Variables ****" && \
go env -w GOCACHE=/root/.cache/go-build
RUN echo "**** Install Dependencies ****" && \
apk add --no-cache \
make \
bash \
gawk \
git
COPY go.mod .
COPY go.sum .
RUN echo "**** Download Go Dependencies ****" && \
go mod download -x
RUN echo "**** Verify Go Dependencies ****" && \
go mod verify
COPY . .
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
echo "**** Build Binary ****" && \
make
RUN echo "**** Print Version Binary ****" && \
./rclone version
RUN \
CGO_ENABLED=0 \
make
RUN ./rclone version
# Begin final image
FROM alpine:latest
RUN echo "**** Install Dependencies ****" && \
apk add --no-cache \
ca-certificates \
fuse3 \
tzdata && \
echo "Enable user_allow_other in fuse" && \
echo "user_allow_other" >> /etc/fuse.conf
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/

View File

@@ -18,11 +18,6 @@ Current active maintainers of rclone are:
| Caleb Case | @calebcase | storj backend |
| wiserain | @wiserain | pikpak backend |
| albertony | @albertony | |
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
| Hideo Aoyama | @boukendesho | snap packaging |
| nielash | @nielash | bisync |
| Dan McArdle | @dmcardle | gitannex |
| Sam Harrison | @childish-sambino | filescom |
**This is a work in progress Draft**

18305
MANUAL.html generated

File diff suppressed because it is too large Load Diff

21299
MANUAL.md generated

File diff suppressed because it is too large Load Diff

21895
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -30,37 +30,29 @@ ifdef RELEASE_TAG
TAG := $(RELEASE_TAG)
endif
GO_VERSION := $(shell go version)
GO_OS := $(shell go env GOOS)
ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR)
endif
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := beta.rclone.org:
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
# Pass in GOTAGS=xyz on the make command line to set build tags
ifdef GOTAGS
BUILDTAGS=-tags "$(GOTAGS)"
LINTTAGS=--build-tags "$(GOTAGS)"
endif
LDFLAGS=--ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)"
.PHONY: rclone test_all vars version
rclone:
ifeq ($(GO_OS),windows)
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
endif
go build -v $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS)
ifeq ($(GO_OS),windows)
rm resource_windows_`go env GOARCH`.syso
endif
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
test_all:
go install $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
vars:
@echo SHELL="'$(SHELL)'"
@@ -74,10 +66,6 @@ btest:
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
@echo "Copied markdown of beta release to clip board"
btesth:
@echo "<a href="$(BETA_URL)">$(TAG)</a> on branch <a href="https://github.com/rclone/rclone/tree/$(BRANCH)">$(BRANCH)</a> (uploaded in 15-30 mins)" | xclip -r -sel clip -t text/html
@echo "Copied beta release in HTML to clip board"
version:
@echo '$(TAG)'
@@ -88,13 +76,13 @@ test: rclone test_all
# Quick test
quicktest:
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./...
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
racequicktest:
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./...
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
compiletest:
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./...
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
# Do source code quality checks
check: rclone
@@ -104,12 +92,16 @@ check: rclone
# Get the build dependencies
build_dep:
go run bin/get-github-release.go -use-api -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
# Get the release dependencies we only install on linux
release_dep_linux:
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
# Get the release dependencies we only install on Windows
release_dep_windows:
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
# Update dependencies
showupdates:
@echo "*** Direct dependencies that could be updated ***"
@@ -144,21 +136,17 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
backenddocs: rclone bin/make_backend_docs.py
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
rcdocs: rclone
bin/make_rc_docs.sh
install: rclone
install -d ${DESTDIR}/usr/bin
install ${GOPATH}/bin/rclone ${DESTDIR}/usr/bin
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
clean:
go clean ./...
@@ -172,7 +160,7 @@ website:
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
upload_website: website
rclone -v sync docs/public www.rclone.org:
rclone -v sync docs/public memstore:www-rclone-org
upload_test_website: website
rclone -P sync docs/public test-rclone-org:
@@ -199,8 +187,8 @@ check_sign:
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
upload:
rclone -P copy build/ downloads.rclone.org:/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "downloads.rclone.org:/$(TAG)/$$i" "downloads.rclone.org:/$$j"'
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
upload_github:
./bin/upload-github $(TAG)
@@ -210,7 +198,7 @@ cross: doc
beta:
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone -v copy build/ pub.rclone.org:/$(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
log_since_last_release:
@@ -223,18 +211,18 @@ ci_upload:
sudo chown -R $$USER build
find build -type l -delete
gzip -r9v build
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
ci_beta:
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
endif
@echo Beta release ready at $(BETA_URL)
@@ -243,7 +231,7 @@ fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
serve: website
cd docs && hugo server --logLevel info -w --disableFastRender
cd docs && hugo server -v -w --disableFastRender
tag: retag doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new

View File

@@ -1,5 +1,3 @@
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
@@ -25,6 +23,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
@@ -39,19 +38,14 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
@@ -59,15 +53,11 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Memory [:page_facing_up:](https://rclone.org/memory/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
@@ -77,25 +67,19 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
@@ -103,7 +87,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
@@ -132,7 +115,6 @@ These backends adapt or modify other storage providers
* Partial syncs supported on a whole file basis
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
* [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))

View File

@@ -37,52 +37,16 @@ This file describes how to make the various kinds of releases
## Update dependencies
Early in the next release cycle update the dependencies.
Early in the next release cycle update the dependencies
* Review any pinned packages in go.mod and remove if possible
* `make updatedirect`
* `make GOTAGS=cmount`
* `make compiletest`
* Fix anything which doesn't compile at this point and commit changes here
* `git commit -a -v -m "build: update all dependencies"`
If the `make updatedirect` upgrades the version of go in the `go.mod`
go 1.22.0
then go to manual mode. `go1.22` here is the lowest supported version
in the `go.mod`.
If `make updatedirect` added a `toolchain` directive then remove it.
We don't want to force a toolchain on our users. Linux packagers are
often using a version of Go that is a few versions out of date.
```
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
go get -d $(cat /tmp/potential-upgrades)
go mod tidy -go=1.22 -compat=1.22
```
If the `go mod tidy` fails use the output from it to remove the
package which can't be upgraded from `/tmp/potential-upgrades` when
done
```
git co go.mod go.sum
```
And try again.
Optionally upgrade the direct and indirect dependencies. This is very
likely to fail if the manual method was used abve - in that case
ignore it as it is too time consuming to fix.
* `make update`
* `make GOTAGS=cmount`
* `make compiletest`
* make updatedirect
* make
* git commit -a -v
* make update
* make
* roll back any updates which didn't compile
* `git commit -a -v --amend`
* **NB** watch out for this changing the default go version in `go.mod`
* git commit -a -v --amend
Note that `make update` updates all direct and indirect dependencies
and there can occasionally be forwards compatibility problems with
@@ -90,19 +54,6 @@ doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to
build.
Once it compiles locally, push it on a test branch and commit fixes
until the tests pass.
### Major versions
The above procedure will not upgrade major versions, so v2 to v3.
However this tool can show which major versions might need to be
upgraded:
go run github.com/icholy/gomajor@latest list -major
Expect API breakage when updating major versions.
## Tidy beta
At some point after the release run
@@ -131,62 +82,42 @@ Now
* git co ${BASE_TAG}-stable
* git cherry-pick any fixes
* make startstable
* Do the steps as above
* make startstable
* git co master
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push
## Sponsor logos
If updating the website note that the sponsor logos have been moved out of the main repository.
You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
which is a private repo containing artwork from sponsors.
## Update the website between releases
Create an update website branch based off the last release
git co -b update-website
If the branch already exists, double check there are no commits that need saving.
Now reset the branch to the last release
git reset --hard v1.64.0
Create the changes, check them in, test with `make serve` then
make upload_test_website
Check out https://test.rclone.org and when happy
make upload_website
Cherry pick any changes back to master and the stable branch if it is active.
## Making a manual build of docker
To do a basic build of rclone's docker image to debug builds locally:
The rclone docker image should autobuild on via GitHub actions. If it doesn't
or needs to be updated then rebuild like this.
See: https://github.com/ilteoood/docker_buildx/issues/19
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
```
docker buildx build --load -t rclone/rclone:testing --progress=plain .
docker run --rm rclone/rclone:testing version
git co v1.54.1
docker pull golang
export DOCKER_CLI_EXPERIMENTAL=enabled
docker buildx create --name actions_builder --use
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
echo "Supported platforms: $SUPPORTED_PLATFORMS"
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
docker buildx stop actions_builder
```
To test the multipatform build
### Old build for linux/amd64 only
```
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
```
To make a full build then set the tags correctly and add `--push`
Note that you can't only build one architecture - you need to build them all.
```
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
docker pull golang
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.52.0
docker push rclone/rclone:1.52
docker push rclone/rclone:1
docker push rclone/rclone:latest
```

View File

@@ -1 +1 @@
v1.70.0
v1.64.0

View File

@@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
configfile.Install()
// Configure the remote
config.FileSetValue(remoteName, "type", "alias")
config.FileSetValue(remoteName, "remote", root)
config.FileSet(remoteName, "type", "alias")
config.FileSet(remoteName, "remote", root)
}
func TestNewFS(t *testing.T) {
@@ -81,12 +81,10 @@ func TestNewFS(t *testing.T) {
for i, gotEntry := range gotEntries {
what := fmt.Sprintf("%s, entry=%d", what, i)
wantEntry := test.entries[i]
_, isDir := gotEntry.(fs.Directory)
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
if !isDir {
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
}
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
_, isDir := gotEntry.(fs.Directory)
require.Equal(t, wantEntry.isDir, isDir, what)
}
}

View File

@@ -4,13 +4,12 @@ package all
import (
// Active file systems
_ "github.com/rclone/rclone/backend/alias"
_ "github.com/rclone/rclone/backend/amazonclouddrive"
_ "github.com/rclone/rclone/backend/azureblob"
_ "github.com/rclone/rclone/backend/azurefiles"
_ "github.com/rclone/rclone/backend/b2"
_ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/cloudinary"
_ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt"
@@ -18,21 +17,16 @@ import (
_ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier"
_ "github.com/rclone/rclone/backend/filefabric"
_ "github.com/rclone/rclone/backend/filescom"
_ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/gofile"
_ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hasher"
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/iclouddrive"
_ "github.com/rclone/rclone/backend/imagekit"
_ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr"
_ "github.com/rclone/rclone/backend/linkbox"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega"
@@ -43,12 +37,9 @@ import (
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/pixeldrain"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/quatrix"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"
@@ -58,7 +49,6 @@ import (
_ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/ulozto"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
// Test AmazonCloudDrive filesystem interface
//go:build acd
// +build acd
package amazonclouddrive_test
import (
"testing"
"github.com/rclone/rclone/backend/amazonclouddrive"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
fstests.RemoteName = "TestAmazonCloudDrive:"
fstests.Run(t)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,151 +1,36 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"context"
"encoding/base64"
"strings"
"testing"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestBlockIDCreator(t *testing.T) {
// Check creation and random number
bic, err := newBlockIDCreator()
require.NoError(t, err)
bic2, err := newBlockIDCreator()
require.NoError(t, err)
assert.NotEqual(t, bic.random, bic2.random)
assert.NotEqual(t, bic.random, [8]byte{})
// Set random to known value for tests
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
chunkNumber := uint64(0xFEDCBA9876543210)
// Check creation of ID
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
got := bic.newBlockID(chunkNumber)
assert.Equal(t, want, got)
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
// Test checkID is working
assert.NoError(t, bic.checkID(chunkNumber, got))
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
}
func (f *Fs) testFeatures(t *testing.T) {
// Check first feature flags are set on this remote
func (f *Fs) InternalTest(t *testing.T) {
// Check first feature flags are set on this
// remote
enabled := f.Features().SetTier
assert.True(t, enabled)
enabled = f.Features().GetTier
assert.True(t, enabled)
}
type ReadSeekCloser struct {
*strings.Reader
}
func (r *ReadSeekCloser) Close() error {
return nil
}
// Stage a block at remote but don't commit it
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
var (
containerName, blobPath = f.split(remote)
containerClient = f.cntSVC(containerName)
blobClient = containerClient.NewBlockBlobClient(blobPath)
data = "uncommitted data"
blockID = "1"
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
)
r := &ReadSeekCloser{strings.NewReader(data)}
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
require.NoError(t, err)
// Verify the block is staged but not committed
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
require.NoError(t, err)
found := false
for _, block := range blockList.UncommittedBlocks {
if *block.Name == blockIDBase64 {
found = true
break
}
func TestIncrement(t *testing.T) {
for _, test := range []struct {
in []byte
want []byte
}{
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
} {
increment(test.in)
assert.Equal(t, test.want, test.in)
}
require.True(t, found, "Block ID not found in uncommitted blocks")
}
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
//
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
//
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
var (
ctx = context.Background()
remote = "testBlob"
)
// Multipart copy the blob please
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
f.opt.UseCopyBlob = false
f.opt.CopyCutoff = f.opt.ChunkSize
defer func() {
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
}()
// Create a blob with uncommitted blocks
f.stageBlockWithoutCommit(ctx, t, remote)
// Now attempt to overwrite the block with a different sized block ID to provoke this error
// Check the object does not exist
_, err := f.NewObject(ctx, remote)
require.Equal(t, fs.ErrorObjectNotFound, err)
// Upload a multipart file over the block with uncommitted chunks of a different ID size
size := 4*int(f.opt.ChunkSize) - 1
contents := random.String(size)
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
// Check size
assert.Equal(t, int64(size), o.Size())
// Create a new blob with uncommitted blocks
newRemote := "testBlob2"
f.stageBlockWithoutCommit(ctx, t, newRemote)
// Copy over that block
dst, err := f.Copy(ctx, o, newRemote)
require.NoError(t, err)
// Check basics
assert.Equal(t, int64(size), dst.Size())
assert.Equal(t, newRemote, dst.Remote())
// Check contents
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents)
// Remove the object
require.NoError(t, dst.Remove(ctx))
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Features", f.testFeatures)
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
}

View File

@@ -1,6 +1,7 @@
// Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
@@ -15,17 +16,13 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool", "Cold"},
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "use_copy_blob", Value: "false"},
},
})
}
@@ -34,17 +31,16 @@ func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestAzureBlob"
name := "TestAzureBlob:"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
RemoteName: name,
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool", "Cold"},
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
{Name: name, Key: "use_copy_blob", Value: "false"},
},
})
}
@@ -53,13 +49,8 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
)
func TestValidateAccessTier(t *testing.T) {
@@ -71,7 +62,6 @@ func TestValidateAccessTier(t *testing.T) {
"HOT": {"HOT", true},
"Hot": {"Hot", true},
"cool": {"cool", true},
"cold": {"cold", true},
"archive": {"archive", true},
"empty": {"", false},
"unknown": {"unknown", false},

View File

@@ -2,6 +2,6 @@
// about "no buildable Go source files "
//go:build plan9 || solaris || js
// +build plan9 solaris js
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
package azureblob

File diff suppressed because it is too large Load Diff

View File

@@ -1,69 +0,0 @@
//go:build !plan9 && !js
package azurefiles
import (
"context"
"math/rand"
"strings"
"testing"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
)
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Authentication", f.InternalTestAuth)
}
var _ fstests.InternalTester = (*Fs)(nil)
func (f *Fs) InternalTestAuth(t *testing.T) {
t.Skip("skipping since this requires authentication credentials which are not part of repo")
shareName := "test-rclone-oct-2023"
testCases := []struct {
name string
options *Options
}{
{
name: "ConnectionString",
options: &Options{
ShareName: shareName,
ConnectionString: "",
},
},
{
name: "AccountAndKey",
options: &Options{
ShareName: shareName,
Account: "",
Key: "",
}},
{
name: "SASUrl",
options: &Options{
ShareName: shareName,
SASURL: "",
}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fs, err := newFsFromOptions(context.TODO(), "TestAzureFiles", "", tc.options)
assert.NoError(t, err)
dirName := randomString(10)
assert.NoError(t, fs.Mkdir(context.TODO(), dirName))
})
}
}
const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
func randomString(charCount int) string {
strBldr := strings.Builder{}
for range charCount {
randPos := rand.Int63n(52)
strBldr.WriteByte(chars[randPos])
}
return strBldr.String()
}

View File

@@ -1,17 +0,0 @@
//go:build !plan9 && !js
package azurefiles
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
func TestIntegration(t *testing.T) {
var objPtr *Object
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureFiles:",
NilObject: objPtr,
})
}

View File

@@ -1,7 +0,0 @@
// Build for azurefiles for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || js
// Package azurefiles provides an interface to Microsoft Azure Files
package azurefiles

View File

@@ -33,19 +33,10 @@ var _ fserrors.Fataler = (*Error)(nil)
// Bucket describes a B2 bucket
type Bucket struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}
// LifecycleRule is a single lifecycle rule
type LifecycleRule struct {
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"`
FileNamePrefix string `json:"fileNamePrefix"`
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// Timestamp is a UTC time when this file was uploaded. It is a base
@@ -130,10 +121,10 @@ type AuthorizeAccountResponse struct {
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
@@ -215,10 +206,9 @@ type FileInfo struct {
// CreateBucketRequest is used to create a bucket
type CreateBucketRequest struct {
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// DeleteBucketRequest is used to create a bucket
@@ -341,11 +331,3 @@ type CopyPartRequest struct {
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
}
// UpdateBucketRequest describes a request to modify a B2 bucket
type UpdateBucketRequest struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Type string `json:"bucketType,omitempty"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}

View File

@@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
}
func TestTimestampEqual(t *testing.T) {
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
assert.False(t, emptyT.Equal(emptyT))
assert.False(t, t0.Equal(emptyT))
assert.False(t, emptyT.Equal(t0))
assert.False(t, t0.Equal(t1))
assert.False(t, t1.Equal(t0))
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
assert.True(t, t0.Equal(t0))
assert.True(t, t1.Equal(t1))
}

View File

@@ -9,14 +9,12 @@ import (
"bytes"
"context"
"crypto/sha1"
"encoding/json"
"errors"
"fmt"
gohash "hash"
"io"
"net/http"
"path"
"slices"
"strconv"
"strings"
"sync"
@@ -31,11 +29,9 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest"
@@ -61,8 +57,9 @@ const (
minChunkSize = 5 * fs.Mebi
defaultChunkSize = 96 * fs.Mebi
defaultUploadCutoff = 200 * fs.Mebi
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
defaultMaxAge = 24 * time.Hour
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
// Globals
@@ -77,7 +74,6 @@ func init() {
Name: "b2",
Description: "Backblaze B2",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "account",
Help: "Account ID or Application Key ID.",
@@ -104,7 +100,7 @@ below will cause b2 to return specific errors:
* "force_cap_exceeded"
These will be set in the "X-Bz-Test-Mode" header which is documented
in the [b2 integrations checklist](https://www.backblaze.com/docs/cloud-storage-integration-checklist).`,
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
Default: "",
Hide: fs.OptionHideConfigurator,
Advanced: true,
@@ -153,18 +149,6 @@ might a maximum of "--transfers" chunks in progress at once.
5,000,000 Bytes is the minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
Note that chunks are stored in memory and there may be up to
"--transfers" * "--b2-upload-concurrency" chunks stored at once
in memory.`,
Default: 4,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files.
@@ -196,57 +180,29 @@ Example:
Advanced: true,
}, {
Name: "download_auth_duration",
Help: `Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.
This is used in combination with "rclone link" for making files
accessible to the public and sets the duration before the download
authorization token will expire.
Help: `Time before the authorization token will expire in s or suffix ms|s|m|h|d.
The duration before the download authorization token will expire.
The minimum value is 1 second. The maximum value is one week.`,
Default: fs.Duration(7 * 24 * time.Hour),
Advanced: true,
}, {
Name: "memory_pool_flush_time",
Default: fs.Duration(time.Minute),
Default: memoryPoolFlushTime,
Advanced: true,
Hide: fs.OptionHideBoth,
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, {
Name: "memory_pool_use_mmap",
Default: false,
Advanced: true,
Hide: fs.OptionHideBoth,
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
}, {
Name: "lifecycle",
Help: `Set the number of days deleted files should be kept when creating a bucket.
On bucket creation, this parameter is used to create a lifecycle rule
for the entire bucket.
If lifecycle is 0 (the default) it does not create a lifecycle rule so
the default B2 behaviour applies. This is to create versions of files
on delete and overwrite and to keep them indefinitely.
If lifecycle is >0 then it creates a single rule setting the number of
days before a file that is deleted or overwritten is deleted
permanently. This is known as daysFromHidingToDeleting in the b2 docs.
The minimum value for this parameter is 1 day.
You can also enable hard_delete in the config also which will mean
deletions won't cause versions but overwrites will still cause
versions to be made.
See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket creation.
`,
Default: 0,
Default: memoryPoolUseMmap,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// See: https://www.backblaze.com/docs/cloud-storage-files
// See: https://www.backblaze.com/b2/docs/files.html
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
// FIXME: allow /, but not leading, trailing or double
Default: (encoder.Display |
@@ -268,11 +224,11 @@ type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
Lifecycle int `config:"lifecycle"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -297,6 +253,7 @@ type Fs struct {
authMu sync.Mutex // lock for authorizing the account
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
}
// Object describes a b2 object
@@ -365,7 +322,7 @@ var retryErrorCodes = []int{
504, // Gateway Time-out
}
// shouldRetryNoReauth returns a boolean as to whether this resp and err
// shouldRetryNoAuth returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
@@ -406,18 +363,11 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
// errorHandler parses a non 2xx error response into an error
func errorHandler(resp *http.Response) error {
body, err := rest.ReadBody(resp)
if err != nil {
fs.Errorf(nil, "Couldn't read error out of body: %v", err)
body = nil
}
// Decode error response if there was one - they can be blank
// Decode error response
errResponse := new(api.Error)
if len(body) > 0 {
err = json.Unmarshal(body, errResponse)
if err != nil {
fs.Errorf(nil, "Couldn't decode error response: %v", err)
}
err := rest.DecodeJSON(resp, &errResponse)
if err != nil {
fs.Debugf(nil, "Couldn't decode error response: %v", err)
}
if errResponse.Code == "" {
errResponse.Code = "unknown"
@@ -461,14 +411,6 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
return
}
func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
}
return
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
@@ -516,14 +458,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
uploads: make(map[string][]*api.GetUploadURLResponse),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
ci.Transfers,
opt.MemoryPoolUseMmap,
),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
ChunkWriterDoesntSeek: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(ctx, f)
// Set the test flag if required
if opt.TestMode != "" {
@@ -590,7 +537,12 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
// hasPermission returns if the current AuthorizationToken has the selected permission
func (f *Fs) hasPermission(permission string) bool {
return slices.Contains(f.info.Allowed.Capabilities, permission)
for _, capability := range f.info.Allowed.Capabilities {
if capability == permission {
return true
}
}
return false
}
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
@@ -645,24 +597,23 @@ func (f *Fs) clearUploadURL(bucketID string) {
f.uploadMu.Unlock()
}
// getRW gets a RW buffer and an upload token
// getBuf gets a buffer of f.opt.ChunkSize and an upload token
//
// If noBuf is set then it just gets an upload token
func (f *Fs) getRW(noBuf bool) (rw *pool.RW) {
func (f *Fs) getBuf(noBuf bool) (buf []byte) {
f.uploadToken.Get()
if !noBuf {
rw = multipart.NewRW()
buf = f.pool.Get()
}
return rw
return buf
}
// putRW returns a RW buffer to the memory pool and returns an upload
// token
// putBuf returns a buffer to the memory pool and an upload token
//
// If buf is nil then it just returns the upload token
func (f *Fs) putRW(rw *pool.RW) {
if rw != nil {
_ = rw.Close()
// If noBuf is set then it just returns the upload token
func (f *Fs) putBuf(buf []byte, noBuf bool) {
if !noBuf {
f.pool.Put(buf)
}
f.uploadToken.Put()
}
@@ -869,7 +820,7 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
// listBuckets returns all the buckets to out
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
err = f.listBucketsToFn(ctx, "", func(bucket *api.Bucket) error {
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
d := fs.NewDir(bucket.Name, time.Time{})
entries = append(entries, d)
return nil
@@ -918,7 +869,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
last := ""
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
@@ -962,14 +913,11 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
type listBucketFn func(*api.Bucket) error
// listBucketsToFn lists the buckets to the function supplied
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
var account = api.ListBucketsRequest{
AccountID: f.info.AccountID,
BucketID: f.info.Allowed.BucketID,
}
if bucketName != "" && account.BucketID == "" {
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
}
var response api.ListBucketsResponse
opts := rest.Opts{
@@ -1015,7 +963,7 @@ func (f *Fs) getbucketType(ctx context.Context, bucket string) (bucketType strin
if bucketType != "" {
return bucketType, nil
}
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
// listBucketsToFn reads bucket Types
return nil
})
@@ -1050,7 +998,7 @@ func (f *Fs) getBucketID(ctx context.Context, bucket string) (bucketID string, e
if bucketID != "" {
return bucketID, nil
}
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
// listBucketsToFn sets IDs
return nil
})
@@ -1114,11 +1062,6 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
Name: f.opt.Enc.FromStandardName(bucket),
Type: "allPrivate",
}
if f.opt.Lifecycle > 0 {
request.LifecycleRules = []api.LifecycleRule{{
DaysFromHidingToDeleting: &f.opt.Lifecycle,
}}
}
var response api.Bucket
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
@@ -1246,7 +1189,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
// if oldOnly is true then it deletes only non current files.
//
// Implemented here so we can make sure we delete old versions.
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) error {
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
bucket, directory := f.split(dir)
if bucket == "" {
return errors.New("can't purge from root")
@@ -1264,14 +1207,14 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
}
}
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
return time.Since(time.Time(timestamp)) > maxAge
return time.Since(time.Time(timestamp)).Hours() > 24
}
// Delete Config.Transfers in parallel
toBeDeleted := make(chan *api.File, f.ci.Transfers)
var wg sync.WaitGroup
wg.Add(f.ci.Transfers)
for range f.ci.Transfers {
for i := 0; i < f.ci.Transfers; i++ {
go func() {
defer wg.Done()
for object := range toBeDeleted {
@@ -1287,21 +1230,6 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
}
}()
}
if oldOnly {
if deleteHidden && deleteUnfinished {
fs.Infof(f, "cleaning bucket %q of all hidden files, and pending multipart uploads older than %v", bucket, maxAge)
} else if deleteHidden {
fs.Infof(f, "cleaning bucket %q of all hidden files", bucket)
} else if deleteUnfinished {
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
} else {
fs.Errorf(f, "cleaning bucket %q of nothing. This should never happen!", bucket)
return nil
}
} else {
fs.Infof(f, "cleaning bucket %q of all files", bucket)
}
last := ""
checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
@@ -1312,24 +1240,18 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
if oldOnly && last != remote {
// Check current version of the file
if deleteHidden && object.Action == "hide" {
if object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
if !operations.SkipDestructive(ctx, object.Name, "remove hide marker") {
toBeDeleted <- object
}
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
toBeDeleted <- object
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
if !operations.SkipDestructive(ctx, object.Name, "remove pending upload") {
toBeDeleted <- object
}
toBeDeleted <- object
} else {
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
}
} else {
fs.Debugf(remote, "Deleting (id %q)", object.ID)
if !operations.SkipDestructive(ctx, object.Name, "delete") {
toBeDeleted <- object
}
toBeDeleted <- object
}
last = remote
tr.Done(ctx, nil)
@@ -1347,17 +1269,12 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purge(ctx, dir, false, false, false, defaultMaxAge)
return f.purge(ctx, dir, false)
}
// CleanUp deletes all hidden files and pending multipart uploads older than 24 hours.
// CleanUp deletes all the hidden files.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.purge(ctx, "", true, true, true, defaultMaxAge)
}
// cleanUp deletes all hidden files and/or pending multipart uploads older than the specified age.
func (f *Fs) cleanUp(ctx context.Context, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) (err error) {
return f.purge(ctx, "", true, deleteHidden, deleteUnfinished, maxAge)
return f.purge(ctx, "", true)
}
// copy does a server-side copy from dstObj <- srcObj
@@ -1365,7 +1282,7 @@ func (f *Fs) cleanUp(ctx context.Context, deleteHidden bool, deleteUnfinished bo
// If newInfo is nil then the metadata will be copied otherwise it
// will be replaced with newInfo
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
if srcObj.size > int64(f.opt.CopyCutoff) {
if srcObj.size >= int64(f.opt.CopyCutoff) {
if newInfo == nil {
newInfo, err = srcObj.getMetaData(ctx)
if err != nil {
@@ -1376,11 +1293,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
if err != nil {
return err
}
err = up.Copy(ctx)
if err != nil {
return err
}
return dstObj.decodeMetaDataFileInfo(up.info)
return up.Upload(ctx)
}
dstBucket, dstPath := dstObj.split()
@@ -1509,7 +1422,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if err != nil {
return "", err
}
absPath := "/" + urlEncode(bucketPath)
absPath := "/" + bucketPath
link = RootURL + "/file/" + urlEncode(bucket) + absPath
bucketType, err := f.getbucketType(ctx, bucket)
if err != nil {
@@ -1569,7 +1482,7 @@ func (o *Object) Size() int64 {
//
// Make sure it is lower case.
//
// Remove unverified prefix - see https://www.backblaze.com/docs/cloud-storage-upload-files-with-the-native-api
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (e.g. Cyberduck) use this
func cleanSHA1(sha1 string) string {
const unverified = "unverified:"
@@ -1596,11 +1509,7 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
o.size = Size
// Use the UploadTimestamp if can't get file info
o.modTime = time.Time(UploadTimestamp)
err = o.parseTimeString(Info[timeKey])
if err != nil {
return err
}
return nil
return o.parseTimeString(Info[timeKey])
}
// decodeMetaData sets the metadata in the object from an api.File
@@ -1702,16 +1611,6 @@ func timeString(modTime time.Time) string {
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
}
// parseTimeStringHelper converts a decimal string number of milliseconds
// elapsed since January 1, 1970 UTC into a time.Time
func parseTimeStringHelper(timeString string) (time.Time, error) {
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
if err != nil {
return time.Time{}, err
}
return time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC(), nil
}
// parseTimeString converts a decimal string number of milliseconds
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
// the modTime variable.
@@ -1719,12 +1618,12 @@ func (o *Object) parseTimeString(timeString string) (err error) {
if timeString == "" {
return nil
}
modTime, err := parseTimeStringHelper(timeString)
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
if err != nil {
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
return nil
}
o.modTime = modTime
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
return nil
}
@@ -1801,14 +1700,14 @@ func (file *openFile) Close() (err error) {
// Check to see we read the correct number of bytes
if file.o.Size() != file.bytes {
return fmt.Errorf("corrupted on transfer: lengths differ want %d vs got %d", file.o.Size(), file.bytes)
return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
}
// Check the SHA1
receivedSHA1 := file.o.sha1
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
return fmt.Errorf("corrupted on transfer: SHA1 hashes differ want %q vs got %q", receivedSHA1, calculatedSHA1)
return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
}
return nil
@@ -1878,7 +1777,6 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
ContentType: resp.Header.Get("Content-Type"),
Info: Info,
}
// When reading files from B2 via cloudflare using
// --b2-download-url cloudflare strips the Content-Length
// headers (presumably so it can inject stuff) so use the old
@@ -1935,7 +1833,7 @@ func init() {
// urlEncode encodes in with % encoding
func urlEncode(in string) string {
var out bytes.Buffer
for i := range len(in) {
for i := 0; i < len(in); i++ {
c := in[i]
if noNeedToEncode[c] {
_ = out.WriteByte(c)
@@ -1963,11 +1861,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return err
}
if size < 0 {
if size == -1 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
rw := o.fs.getRW(false)
buf := o.fs.getBuf(false)
n, err := io.CopyN(rw, in, int64(o.fs.opt.ChunkSize))
n, err := io.ReadFull(in, buf)
if err == nil {
bufReader := bufio.NewReader(in)
in = bufReader
@@ -1976,42 +1874,31 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err == nil {
fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil, options...)
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
if err != nil {
o.fs.putRW(rw)
o.fs.putBuf(buf, false)
return err
}
// NB Stream returns the buffer and token
err = up.Stream(ctx, rw)
if err != nil {
return err
}
return o.decodeMetaDataFileInfo(up.info)
} else if err == io.EOF {
return up.Stream(ctx, buf)
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
defer o.fs.putRW(rw)
size = n
in = rw
defer o.fs.putBuf(buf, false)
size = int64(n)
in = bytes.NewReader(buf[:n])
} else {
o.fs.putRW(rw)
o.fs.putBuf(buf, false)
return err
}
} else if size > int64(o.fs.opt.UploadCutoff) {
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
if err != nil {
return err
}
up := chunkWriter.(*largeUpload)
return o.decodeMetaDataFileInfo(up.info)
return up.Upload(ctx)
}
modTime, err := o.getModTime(ctx, src, options)
if err != nil {
return err
}
modTime := src.ModTime(ctx)
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
if calculatedSha1 == "" {
@@ -2116,71 +2003,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.decodeMetaDataFileInfo(&response)
}
// Get modTime from the source; if --metadata is set, fetch the src metadata and get it from there.
// When metadata support is added to b2, this method will need a more generic name
func (o *Object) getModTime(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (time.Time, error) {
modTime := src.ModTime(ctx)
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
if err != nil {
return time.Time{}, fmt.Errorf("failed to read metadata from source object: %w", err)
}
// merge metadata into request and user metadata
for k, v := range meta {
k = strings.ToLower(k)
// For now, the only metadata we're concerned with is "mtime"
switch k {
case "mtime":
// mtime in meta overrides source ModTime
metaModTime, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
} else {
modTime = metaModTime
}
default:
// Do nothing for now
}
}
return modTime, nil
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// FIXME what if file is smaller than 1 chunk?
if f.opt.Versions {
return info, nil, errNotWithVersions
}
if f.opt.VersionAt.IsSet() {
return info, nil, errNotWithVersionAt
}
//size := src.Size()
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
bucket, _ := o.split()
err = f.makeBucket(ctx, bucket)
if err != nil {
return info, nil, err
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(f.opt.ChunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
return info, up, err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split()
@@ -2206,212 +2028,16 @@ func (o *Object) ID() string {
return o.id
}
var lifecycleHelp = fs.CommandHelp{
Name: "lifecycle",
Short: "Read or set the lifecycle for a bucket",
Long: `This command can be used to read or set the lifecycle for a bucket.
Usage Examples:
To show the current lifecycle rules:
rclone backend lifecycle b2:bucket
This will dump something like this showing the lifecycle rules.
[
{
"daysFromHidingToDeleting": 1,
"daysFromUploadingToHiding": null,
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
"fileNamePrefix": ""
}
]
If there are no lifecycle rules (the default) then it will just return [].
To reset the current lifecycle rules:
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
This will run and then print the new lifecycle rules as above.
Rclone only lets you set lifecycles for the whole bucket with the
fileNamePrefix = "".
You can't disable versioning with B2. The best you can do is to set
the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
the config also which will mean deletions won't cause versions but
overwrites will still cause versions to be made.
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
`,
Opts: map[string]string{
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
},
}
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
var newRule api.LifecycleRule
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromHidingToDeleting: %w", err)
}
newRule.DaysFromHidingToDeleting = &days
}
if daysStr := opt["daysFromUploadingToHiding"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromUploadingToHiding: %w", err)
}
newRule.DaysFromUploadingToHiding = &days
}
if daysStr := opt["daysFromStartingToCancelingUnfinishedLargeFiles"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromStartingToCancelingUnfinishedLargeFiles: %w", err)
}
newRule.DaysFromStartingToCancelingUnfinishedLargeFiles = &days
}
bucketName, _ := f.split("")
if bucketName == "" {
return nil, errors.New("bucket required")
}
skip := operations.SkipDestructive(ctx, name, "update lifecycle rules")
var bucket *api.Bucket
if !skip && (newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil) {
bucketID, err := f.getBucketID(ctx, bucketName)
if err != nil {
return nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_update_bucket",
}
var request = api.UpdateBucketRequest{
ID: bucketID,
AccountID: f.info.AccountID,
LifecycleRules: []api.LifecycleRule{newRule},
}
var response api.Bucket
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
bucket = &response
} else {
err = f.listBucketsToFn(ctx, bucketName, func(b *api.Bucket) error {
bucket = b
return nil
})
if err != nil {
return nil, err
}
}
if bucket == nil {
return nil, fs.ErrorDirNotFound
}
return bucket.LifecycleRules, nil
}
var cleanupHelp = fs.CommandHelp{
Name: "cleanup",
Short: "Remove unfinished large file uploads.",
Long: `This command removes unfinished large file uploads of age greater than
max-age, which defaults to 24 hours.
Note that you can use --interactive/-i or --dry-run with this command to see what
it would do.
rclone backend cleanup b2:bucket/path/to/object
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
`,
Opts: map[string]string{
"max-age": "Max age of upload to delete",
},
}
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
maxAge := defaultMaxAge
if opt["max-age"] != "" {
maxAge, err = fs.ParseDuration(opt["max-age"])
if err != nil {
return nil, fmt.Errorf("bad max-age: %w", err)
}
}
return nil, f.cleanUp(ctx, false, true, maxAge)
}
var cleanupHiddenHelp = fs.CommandHelp{
Name: "cleanup-hidden",
Short: "Remove old versions of files.",
Long: `This command removes any old hidden versions of files.
Note that you can use --interactive/-i or --dry-run with this command to see what
it would do.
rclone backend cleanup-hidden b2:bucket/path/to/dir
`,
}
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
return nil, f.cleanUp(ctx, true, false, 0)
}
var commandHelp = []fs.CommandHelp{
lifecycleHelp,
cleanupHelp,
cleanupHiddenHelp,
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "lifecycle":
return f.lifecycleCommand(ctx, name, arg, opt)
case "cleanup":
return f.cleanupCommand(ctx, name, arg, opt)
case "cleanup-hidden":
return f.cleanupHiddenCommand(ctx, name, arg, opt)
default:
return nil, fs.ErrorCommandNotFound
}
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Commander = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
)

View File

@@ -1,31 +1,14 @@
package b2
import (
"context"
"crypto/sha1"
"fmt"
"path"
"sort"
"strings"
"testing"
"time"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/version"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Test b2 string encoding
// https://www.backblaze.com/docs/cloud-storage-native-api-string-encoding
// https://www.backblaze.com/b2/docs/string_encoding.html
var encodeTest = []struct {
fullyEncoded string
@@ -185,435 +168,3 @@ func TestParseTimeString(t *testing.T) {
}
}
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
var headers = make(map[string]string)
for _, option := range options {
k, v := option.Header()
k = strings.ToLower(k)
if strings.HasPrefix(k, headerPrefix) {
headers[k[len(headerPrefix):]] = v
}
}
return headers
}
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
t.Run(what, func(t *testing.T) {
ctx := context.Background()
ss := fs.SizeSuffix(0)
err := ss.Set(size)
require.NoError(t, err)
original := random.String(int(ss))
contents := fstest.Gz(t, original)
mimeType := "text/html"
if chunkSize != "" {
ss := fs.SizeSuffix(0)
err := ss.Set(chunkSize)
require.NoError(t, err)
_, err = f.SetUploadChunkSize(ss)
require.NoError(t, err)
}
if uploadCutoff != "" {
ss := fs.SizeSuffix(0)
err := ss.Set(uploadCutoff)
require.NoError(t, err)
_, err = f.SetUploadCutoff(ss)
require.NoError(t, err)
}
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
btime := time.Now()
metadata := fs.Metadata{
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
"mtime": "2009-05-06T04:05:06.499Z",
}
// Need to specify HTTP options with the header prefix since they are passed as-is
options := []fs.OpenOption{
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
o := obj.(*Object)
gotMetadata, err := o.getMetaData(ctx)
require.NoError(t, err)
// X-Bz-Info-a & X-Bz-Info-b
optMetadata := OpenOptionToMetaData(options)
for k, v := range optMetadata {
got := gotMetadata.Info[k]
assert.Equal(t, v, got, k)
}
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
// Modification time from the x-bz-info-src_last_modified_millis header
var mtime api.Timestamp
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
if err != nil {
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
}
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
// Upload time
gotBtime := time.Time(gotMetadata.UploadTimestamp)
dt := gotBtime.Sub(btime)
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
t.Run("GzipEncoding", func(t *testing.T) {
// Test that the gzipped file we uploaded can be
// downloaded
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
gotContents := fstests.ReadObject(ctx, t, o, -1)
assert.Equal(t, wantContents, gotContents)
assert.Equal(t, wantSize, o.Size())
gotHash, err := o.Hash(ctx, hash.SHA1)
require.NoError(t, err)
assert.Equal(t, wantHash, gotHash)
}
t.Run("NoDecompress", func(t *testing.T) {
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
})
})
})
}
func (f *Fs) InternalTestMetadata(t *testing.T) {
// 1 kB regular file
f.internalTestMetadata(t, "1kiB", "", "")
// 10 MiB large file
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
}
func sha1Sum(t *testing.T, s string) string {
hash := sha1.Sum([]byte(s))
return fmt.Sprintf("%x", hash)
}
// This is adapted from the s3 equivalent.
func (f *Fs) InternalTestVersions(t *testing.T) {
ctx := context.Background()
// Small pause to make the LastModified different since AWS
// only seems to track them to 1 second granularity
time.Sleep(2 * time.Second)
// Create an object
const dirName = "versions"
const fileName = dirName + "/" + "test-versions.txt"
contents := random.String(100)
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
objMetadata, err := obj.(*Object).getMetaData(ctx)
require.NoError(t, err)
// Small pause
time.Sleep(2 * time.Second)
// Remove it
assert.NoError(t, obj.Remove(ctx))
// Small pause to make the LastModified different since AWS only seems to track them to 1 second granularity
time.Sleep(2 * time.Second)
// And create it with different size and contents
newContents := random.String(101)
newItem := fstest.NewItem(fileName, newContents, fstest.Time("2002-05-06T04:05:06.499999999Z"))
newObj := fstests.PutTestContents(ctx, t, f, &newItem, newContents, true)
newObjMetadata, err := newObj.(*Object).getMetaData(ctx)
require.NoError(t, err)
t.Run("Versions", func(t *testing.T) {
// Set --b2-versions for this test
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Read the contents
entries, err := f.List(ctx, dirName)
require.NoError(t, err)
tests := 0
var fileNameVersion string
for _, entry := range entries {
t.Log(entry)
remote := entry.Remote()
if remote == fileName {
t.Run("ReadCurrent", func(t *testing.T) {
assert.Equal(t, newContents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
})
tests++
} else if versionTime, p := version.Remove(remote); !versionTime.IsZero() && p == fileName {
t.Run("ReadVersion", func(t *testing.T) {
assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
})
assert.WithinDuration(t, time.Time(objMetadata.UploadTimestamp), versionTime, time.Second, "object time must be with 1 second of version time")
fileNameVersion = remote
tests++
}
}
assert.Equal(t, 2, tests, "object missing from listing")
// Check we can read the object with a version suffix
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, fileNameVersion)
require.NoError(t, err)
require.NotNil(t, o)
assert.Equal(t, int64(100), o.Size(), o.Remote())
})
// Check we can make a NewFs from that object with a version suffix
t.Run("NewFs", func(t *testing.T) {
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
// Make sure --b2-versions is set in the config of the new remote
fs.Debugf(nil, "oldPath = %q", newPath)
lastColon := strings.LastIndex(newPath, ":")
require.True(t, lastColon >= 0)
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
fs.Debugf(nil, "newPath = %q", newPath)
fNew, err := cache.Get(ctx, newPath)
// This should return pointing to a file
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, fNew)
// With the directory above
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
})
})
t.Run("VersionAt", func(t *testing.T) {
// We set --b2-version-at for this test so make sure we reset it at the end
defer func() {
f.opt.VersionAt = fs.Time{}
}()
var (
firstObjectTime = time.Time(objMetadata.UploadTimestamp)
secondObjectTime = time.Time(newObjMetadata.UploadTimestamp)
)
for _, test := range []struct {
what string
at time.Time
want []fstest.Item
wantErr error
wantSize int64
}{
{
what: "Before",
at: firstObjectTime.Add(-time.Second),
want: fstests.InternalTestFiles,
wantErr: fs.ErrorObjectNotFound,
},
{
what: "AfterOne",
at: firstObjectTime.Add(time.Second),
want: append([]fstest.Item{item}, fstests.InternalTestFiles...),
wantSize: 100,
},
{
what: "AfterDelete",
at: secondObjectTime.Add(-time.Second),
want: fstests.InternalTestFiles,
wantErr: fs.ErrorObjectNotFound,
},
{
what: "AfterTwo",
at: secondObjectTime.Add(time.Second),
want: append([]fstest.Item{newItem}, fstests.InternalTestFiles...),
wantSize: 101,
},
} {
t.Run(test.what, func(t *testing.T) {
f.opt.VersionAt = fs.Time(test.at)
t.Run("List", func(t *testing.T) {
fstest.CheckListing(t, f, test.want)
})
// b2 NewObject doesn't work with VersionAt
//t.Run("NewObject", func(t *testing.T) {
// gotObj, gotErr := f.NewObject(ctx, fileName)
// assert.Equal(t, test.wantErr, gotErr)
// if gotErr == nil {
// assert.Equal(t, test.wantSize, gotObj.Size())
// }
//})
})
}
})
t.Run("Cleanup", func(t *testing.T) {
t.Run("DryRun", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should be unchanged after dry run
before := listAllFiles(ctx, t, f, dirName)
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, true, false, 0))
after := listAllFiles(ctx, t, f, dirName)
assert.Equal(t, before, after)
})
t.Run("RealThing", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should reflect current state after cleanup
require.NoError(t, f.cleanUp(ctx, true, false, 0))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
fstest.CheckListing(t, f, items)
})
})
// Purge gets tested later
}
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
ctx := context.Background()
// B2CleanupHidden tests cleaning up hidden files
t.Run("CleanupUnfinished", func(t *testing.T) {
dirName := "unfinished"
fileCount := 5
expectedFiles := []string{}
for i := 1; i < fileCount; i++ {
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
expectedFiles = append(expectedFiles, fileName)
obj := &Object{
fs: f,
remote: fileName,
}
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
require.NoError(t, err)
}
checkListing(ctx, t, f, dirName, expectedFiles)
t.Run("DryRun", func(t *testing.T) {
// Listing should not change after dry run
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, expectedFiles)
})
t.Run("RealThing", func(t *testing.T) {
// Listing should be empty after real cleanup
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, []string{})
})
})
}
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
bucket, directory := f.split(dirName)
foundFiles := []string{}
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
foundFiles = append(foundFiles, object.Name)
}
return nil
}))
sort.Strings(foundFiles)
return foundFiles
}
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
foundFiles := listAllFiles(ctx, t, f, dirName)
sort.Strings(expectedFiles)
assert.Equal(t, expectedFiles, foundFiles)
}
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
ctx := context.Background()
opt := map[string]string{}
t.Run("InitState", func(t *testing.T) {
// There should be no lifecycle rules at the outset
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("DryRun", func(t *testing.T) {
// There should still be no lifecycle rules after each dry run operation
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("RealThing", func(t *testing.T) {
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
})
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Metadata", f.InternalTestMetadata)
t.Run("Versions", f.InternalTestVersions)
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -28,12 +28,7 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
)

View File

@@ -1,10 +1,11 @@
// Upload large files for b2
//
// Docs - https://www.backblaze.com/docs/cloud-storage-large-files
// Docs - https://www.backblaze.com/b2/docs/large_files.html
package b2
import (
"bytes"
"context"
"crypto/sha1"
"encoding/hex"
@@ -13,6 +14,7 @@ import (
"io"
"strings"
"sync"
"time"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
@@ -78,31 +80,36 @@ type largeUpload struct {
wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded
size int64 // total size
parts int // calculated number of parts, if known
sha1smu sync.Mutex // mutex to protect sha1s
parts int64 // calculated number of parts, if known
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from
info *api.FileInfo // final response with info about the object
}
// newLargeUpload starts an upload of object o from in with metadata in src
//
// If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) {
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
size := src.Size()
parts := 0
parts := int64(0)
sha1SliceSize := int64(maxParts)
chunkSize := defaultChunkSize
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
parts = int(size / int64(chunkSize))
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts++
}
sha1SliceSize = parts
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
}
bucket, bucketPath := o.split()
bucketID, err := f.getBucketID(ctx, bucket)
@@ -113,27 +120,12 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
BucketID: bucketID,
Name: f.opt.Enc.FromStandardPath(bucketPath),
}
optionsToSend := make([]fs.OpenOption, 0, len(options))
if newInfo == nil {
modTime, err := o.getModTime(ctx, src, options)
if err != nil {
return nil, err
}
modTime := src.ModTime(ctx)
request.ContentType = fs.MimeType(ctx, src)
request.Info = map[string]string{
timeKey: timeString(modTime),
}
// Custom upload headers - remove header prefix since they are sent in the body
for _, option := range options {
k, v := option.Header()
k = strings.ToLower(k)
if strings.HasPrefix(k, headerPrefix) {
request.Info[k[len(headerPrefix):]] = v
} else {
optionsToSend = append(optionsToSend, option)
}
}
// Set the SHA1 if known
if !o.fs.opt.DisableCheckSum || doCopy {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
@@ -144,11 +136,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
request.ContentType = newInfo.ContentType
request.Info = newInfo.Info
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
Options: optionsToSend,
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
@@ -165,7 +152,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, 0, 16),
sha1s: make([]string, sha1SliceSize),
chunkSize: int64(chunkSize),
}
// unwrap the accounting from the input, we use wrap to put it
@@ -184,26 +171,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock()
if len(up.uploads) > 0 {
defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:]
up.uploadMu.Unlock()
return upload, nil
}
up.uploadMu.Unlock()
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err = up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
return upload, nil
}
@@ -218,39 +203,10 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock()
}
// Add an sha1 to the being built up sha1s
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) {
up.sha1smu.Lock()
defer up.sha1smu.Unlock()
if len(up.sha1s) < chunkNumber+1 {
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
}
up.sha1s[chunkNumber] = sha1
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(1)
}
err = up.f.pacer.Call(func() (bool, error) {
// Discover the size by seeking to the end
size, err = reader.Seek(0, io.SeekEnd)
if err != nil {
return false, err
}
// rewind the reader on retry and after reading size
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
// Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL
upload, err := up.getUploadURL(ctx)
@@ -258,8 +214,8 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
return false, err
}
in := newHashAppendingReader(reader, sha1.New())
sizeWithHash := size + int64(in.AdditionalLength())
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
size := int64(len(body)) + int64(in.AdditionalLength())
// Authorization
//
@@ -289,10 +245,10 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
Body: up.wrap(in),
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1),
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
sha1Header: "hex_digits_at_end",
},
ContentLength: &sizeWithHash,
ContentLength: &size,
}
var response api.UploadPartResponse
@@ -300,7 +256,7 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err)
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
// On retryable error clear PartUploadURL
if retry {
@@ -308,30 +264,30 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
upload = nil
}
up.returnUploadURL(upload)
up.addSha1(chunkNumber, in.HexSum())
up.sha1s[part-1] = in.HexSum()
return retry, err
})
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err)
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
} else {
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber)
fs.Debugf(up.o, "Done sending chunk %d", part)
}
return size, err
return err
}
// Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error {
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_part",
}
offset := int64(part) * up.chunkSize // where we are in the source file
offset := (part - 1) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{
SourceID: up.src.id,
LargeFileID: up.id,
PartNumber: int64(part + 1),
PartNumber: part,
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
}
var response api.UploadPartResponse
@@ -340,7 +296,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
up.addSha1(part, response.SHA1)
up.sha1s[part-1] = response.SHA1
return retry, err
})
if err != nil {
@@ -351,8 +307,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
return err
}
// Close closes off the large upload
func (up *largeUpload) Close(ctx context.Context) error {
// finish closes off the large upload
func (up *largeUpload) finish(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
opts := rest.Opts{
Method: "POST",
@@ -370,12 +326,11 @@ func (up *largeUpload) Close(ctx context.Context) error {
if err != nil {
return err
}
up.info = &response
return nil
return up.o.decodeMetaDataFileInfo(&response)
}
// Abort aborts the large upload
func (up *largeUpload) Abort(ctx context.Context) error {
// cancel aborts the large upload
func (up *largeUpload) cancel(ctx context.Context) error {
fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{
Method: "POST",
@@ -400,102 +355,157 @@ func (up *largeUpload) Abort(ctx context.Context) error {
// reaches EOF.
//
// Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
hasMoreParts = true
)
up.size = initialUploadBlock.Size()
up.parts = 0
for part := 0; hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var rw *pool.RW
if part == 0 {
rw = initialUploadBlock
} else {
rw = up.f.getRW(false)
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putRW(rw)
break
}
// Read the chunk
var n int64
if part == 0 {
n = rw.Size()
} else {
n, err = io.CopyN(rw, up.in, up.chunkSize)
if err == io.EOF {
if n == 0 {
fs.Debugf(up.o, "Not sending empty chunk after EOF - ending.")
up.f.putRW(rw)
break
} else {
fs.Debugf(up.o, "Read less than a full chunk %d, making this the last one.", n)
}
hasMoreParts = false
} else if err != nil {
// other kinds of errors indicate failure
up.f.putRW(rw)
return err
up.size = int64(len(initialUploadBlock))
g.Go(func() error {
for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var buf []byte
if part == 1 {
buf = initialUploadBlock
} else {
buf = up.f.getBuf(false)
}
}
// Keep stats up to date
up.parts += 1
up.size += n
if part > maxParts {
up.f.putRW(rw)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, false)
return nil
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putRW(rw)
_, err = up.WriteChunk(gCtx, part, rw)
return err
})
}
// Read the chunk
var n int
if part == 1 {
n = len(buf)
} else {
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil {
// other kinds of errors indicate failure
up.f.putBuf(buf, false)
return err
}
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, false)
return up.transferChunk(gCtx, part, buf)
})
}
return nil
})
err = g.Wait()
if err != nil {
return err
}
return up.Close(ctx)
up.sha1s = up.sha1s[:up.parts]
return up.finish(ctx)
}
// Copy the chunks from the source to the destination
func (up *largeUpload) Copy(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
// Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
uploadPool *pool.Pool
ci = fs.GetConfig(ctx)
)
g.SetLimit(up.f.opt.UploadConcurrency)
for part := range up.parts {
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in copying all the other parts.
if gCtx.Err() != nil {
break
}
reqSize := min(remaining, up.chunkSize)
part := part // for the closure
g.Go(func() (err error) {
return up.copyChunk(gCtx, part, reqSize)
})
remaining -= reqSize
// If using large chunk size then make a temporary pool
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
uploadPool = up.f.pool
} else {
uploadPool = pool.New(
time.Duration(up.f.opt.MemoryPoolFlushTime),
int(up.chunkSize),
ci.Transfers,
up.f.opt.MemoryPoolUseMmap,
)
defer uploadPool.Flush()
}
// Get an upload token and a buffer
getBuf := func() (buf []byte) {
up.f.getBuf(true)
if !up.doCopy {
buf = uploadPool.Get()
}
return buf
}
// Put an upload token and a buffer
putBuf := func(buf []byte) {
if !up.doCopy {
uploadPool.Put(buf)
}
up.f.putBuf(nil, true)
}
g.Go(func() error {
for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
buf := getBuf()
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
putBuf(buf)
return nil
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
putBuf(buf)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer putBuf(buf)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
})
remaining -= reqSize
}
return nil
})
err = g.Wait()
if err != nil {
return err
}
return up.Close(ctx)
return up.finish(ctx)
}

View File

@@ -52,7 +52,7 @@ func (e *Error) Error() string {
out += ": " + e.Message
}
if e.ContextInfo != nil {
out += fmt.Sprintf(" (%s)", string(e.ContextInfo))
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
}
return out
}
@@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
// Types of things in Item/ItemMini
// Types of things in Item
const (
ItemTypeFolder = "folder"
ItemTypeFile = "file"
@@ -72,31 +72,20 @@ const (
ItemStatusDeleted = "deleted"
)
// ItemMini is a subset of the elements in a full Item returned by some API calls
type ItemMini struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
}
// Item describes a folder or a file as returned by Get Folder Items and others
type Item struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
Parent ItemMini `json:"parent"`
Type string `json:"type"`
ID string `json:"id"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
@@ -167,7 +156,19 @@ type PreUploadCheckResponse struct {
// PreUploadCheckConflict is returned in the ContextInfo error field
// from PreUploadCheck when the error code is "item_name_in_use"
type PreUploadCheckConflict struct {
Conflicts ItemMini `json:"conflicts"`
Conflicts struct {
Type string `json:"type"`
ID string `json:"id"`
FileVersion struct {
Type string `json:"type"`
ID string `json:"id"`
Sha1 string `json:"sha1"`
} `json:"file_version"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
Sha1 string `json:"sha1"`
Name string `json:"name"`
} `json:"conflicts"`
}
// UpdateFileModTime is used in Update File Info
@@ -280,30 +281,3 @@ type User struct {
Address string `json:"address"`
AvatarURL string `json:"avatar_url"`
}
// FileTreeChangeEventTypes are the events that can require cache invalidation
var FileTreeChangeEventTypes = map[string]struct{}{
"ITEM_COPY": {},
"ITEM_CREATE": {},
"ITEM_MAKE_CURRENT_VERSION": {},
"ITEM_MODIFY": {},
"ITEM_MOVE": {},
"ITEM_RENAME": {},
"ITEM_TRASH": {},
"ITEM_UNDELETE_VIA_TRASH": {},
"ITEM_UPLOAD": {},
}
// Event is an array element in the response returned from /events
type Event struct {
EventType string `json:"event_type"`
EventID string `json:"event_id"`
Source Item `json:"source"`
}
// Events is returned from /events
type Events struct {
ChunkSize int64 `json:"chunk_size"`
Entries []Event `json:"entries"`
NextStreamPosition int64 `json:"next_stream_position"`
}

View File

@@ -43,9 +43,9 @@ import (
"github.com/rclone/rclone/lib/jwtutil"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8"
"golang.org/x/oauth2"
)
const (
@@ -64,10 +64,12 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: nil,
AuthURL: "https://app.box.com/api/oauth2/authorize",
TokenURL: "https://app.box.com/api/oauth2/token",
oauthConfig = &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://app.box.com/api/oauth2/authorize",
TokenURL: "https://app.box.com/api/oauth2/token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
@@ -147,23 +149,6 @@ func init() {
Default: "",
Help: "Only show items owned by the login (email address) passed in.",
Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user ID when using a service account.
Setting this flag allows rclone, when using a JWT service account, to
act on behalf of another user by setting the as-user header.
The user ID is the Box identifier for a user. User IDs can found for
any user via the GET /users endpoint, which is only available to
admins, or by calling the GET /users/me endpoint with an authenticated
user session.
See: https://developer.box.com/guides/authentication/jwt/as-user/
`,
Advanced: true,
Sensitive: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -237,8 +222,8 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
return claims, nil
}
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]any {
signingHeaders := map[string]any{
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
signingHeaders := map[string]interface{}{
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
}
return signingHeaders
@@ -254,10 +239,8 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
}
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if block == nil {
return nil, errors.New("box: failed to PEM decode private key")
}
if len(rest) > 0 {
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
}
@@ -279,29 +262,19 @@ type Options struct {
AccessToken string `config:"access_token"`
ListChunk int `config:"list_chunk"`
OwnedBy string `config:"owned_by"`
Impersonate string `config:"impersonate"`
}
// ItemMeta defines metadata we cache for each Item ID
type ItemMeta struct {
SequenceID int64 // the most recent event processed for this item
ParentID string // ID of the parent directory of this item
Name string // leaf name of this item
}
// Fs represents a remote box
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency
}
// Object describes a box object
@@ -380,7 +353,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
// defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
if err != nil {
if err == fs.ErrorDirNotFound {
@@ -389,30 +362,20 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err
}
// Use preupload to find the ID
itemMini, err := f.preUploadCheck(ctx, leaf, directoryID, -1)
if err != nil {
return nil, err
}
if itemMini == nil {
return nil, fs.ErrorObjectNotFound
}
// Now we have the ID we can look up the object proper
opts := rest.Opts{
Method: "GET",
Path: "/files/" + itemMini.ID,
Parameters: fieldsValue(),
}
var item api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &item)
return shouldRetry(ctx, resp, err)
found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
if strings.EqualFold(item.Name, leaf) {
info = item
return true
}
return false
})
if err != nil {
return nil, err
}
return &item, nil
if !found {
return nil, fs.ErrorObjectNotFound
}
return info, nil
}
// errorHandler parses a non 2xx error response into an error
@@ -459,14 +422,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
itemMetaCacheMu: new(sync.Mutex),
itemMetaCache: make(map[string]ItemMeta),
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
}
f.features = (&fs.Features{
CaseInsensitive: true,
@@ -479,11 +440,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
}
// If using impersonate set an as-user header
if f.opt.Impersonate != "" {
f.srv.SetHeader("as-user", f.opt.Impersonate)
}
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
@@ -619,7 +575,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
return shouldRetry(ctx, resp, err)
})
if err != nil {
// fmt.Printf("...Error %v\n", err)
//fmt.Printf("...Error %v\n", err)
return "", err
}
// fmt.Printf("...Id %q\n", *info.Id)
@@ -726,17 +682,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
entries = append(entries, o)
}
// Cache some metadata for this Item to help us process events later
// on. In particular, the box event API does not provide the old path
// of the Item when it is renamed/deleted/moved/etc.
f.itemMetaCacheMu.Lock()
cachedItemMeta, found := f.itemMetaCache[info.ID]
if !found || cachedItemMeta.SequenceID < info.SequenceID {
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
}
f.itemMetaCacheMu.Unlock()
return false
})
if err != nil {
@@ -772,7 +717,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
//
// It returns "", nil if the file is good to go
// It returns "ID", nil if the file must be updated
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (item *api.ItemMini, err error) {
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
check := api.PreUploadCheck{
Name: f.opt.Enc.FromStandardName(leaf),
Parent: api.Parent{
@@ -797,16 +742,16 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
var conflict api.PreUploadCheckConflict
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
if err != nil {
return nil, fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
}
if conflict.Conflicts.Type != api.ItemTypeFile {
return nil, fs.ErrorIsDir
return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
}
return &conflict.Conflicts, nil
return conflict.Conflicts.ID, nil
}
return nil, fmt.Errorf("pre-upload check: %w", err)
return "", fmt.Errorf("pre-upload check: %w", err)
}
return nil, nil
return "", nil
}
// Put the object
@@ -827,11 +772,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// Preflight check the upload, which returns the ID if the
// object already exists
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
if err != nil {
return nil, err
}
if item == nil {
if ID == "" {
return f.PutUnchecked(ctx, in, src, options...)
}
@@ -839,7 +784,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
o := &Object{
fs: f,
remote: remote,
id: item.ID,
id: ID,
}
return o, o.Update(ctx, in, src, options...)
}
@@ -966,26 +911,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err
}
// check if dest already exists
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
if err != nil {
return nil, err
}
if item != nil { // dest already exists, need to copy to temp name and then move
tempSuffix := "-rclone-copy-" + random.String(8)
fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix)
tempObj, err := f.Copy(ctx, src, remote+tempSuffix)
if err != nil {
return nil, err
}
fs.Debugf(remote+tempSuffix, "moving to real name %v", remote)
err = f.deleteObject(ctx, item.ID)
if err != nil {
return nil, err
}
return f.Move(ctx, tempObj, remote)
}
// Copy the object
opts := rest.Opts{
Method: "POST",
@@ -1196,7 +1121,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) {
var (
deleteErrors atomic.Uint64
deleteErrors = int64(0)
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
wg sync.WaitGroup
)
@@ -1212,7 +1137,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil {
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
deleteErrors.Add(1)
atomic.AddInt64(&deleteErrors, 1)
}
}()
} else {
@@ -1221,279 +1146,12 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
return false
})
wg.Wait()
if deleteErrors.Load() != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load())
if deleteErrors != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
}
return err
}
// Shutdown shutdown the fs
func (f *Fs) Shutdown(ctx context.Context) error {
f.tokenRenewer.Shutdown()
return nil
}
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the `stream_position` early so all changes from now on get processed
streamPosition, err := f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
}
// box can send duplicate Event IDs. Use this map to track and filter
// the ones we've already processed.
processedEventIDs := make(map[string]time.Time)
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
if pollInterval != 0 {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
if streamPosition == "" {
streamPosition, err = f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
continue
}
}
// Garbage collect EventIDs older than 1 minute
for eventID, timestamp := range processedEventIDs {
if time.Since(timestamp) > time.Minute {
delete(processedEventIDs, eventID)
}
}
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition, processedEventIDs)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
}
}
}()
}
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", "now")
opts.Parameters.Set("stream_type", "changes")
var result api.Events
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
return strconv.FormatInt(result.NextStreamPosition, 10), nil
}
// Attempts to construct the full path for an object, given the ID of its
// parent directory and the name of the object.
//
// Can return "" if the parentID is not currently in the directory cache.
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
fullPath = ""
name := f.opt.Enc.ToStandardName(childName)
if parentID != "" {
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
if len(parentDir) > 0 {
fullPath = parentDir + "/" + name
} else {
fullPath = name
}
}
} else {
// No parent, this object is at the root
fullPath = name
}
return fullPath
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string, processedEventIDs map[string]time.Time) (nextStreamPosition string, err error) {
nextStreamPosition = streamPosition
for {
// box only allows a max of 500 events
limit := min(f.opt.ListChunk, 500)
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", nextStreamPosition)
opts.Parameters.Set("stream_type", "changes")
opts.Parameters.Set("limit", strconv.Itoa(limit))
var result api.Events
var resp *http.Response
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
if result.ChunkSize != int64(len(result.Entries)) {
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
}
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
if result.ChunkSize == 0 {
return nextStreamPosition, nil
}
type pathToClear struct {
path string
entryType fs.EntryType
}
var pathsToClear []pathToClear
newEventIDs := 0
for _, entry := range result.Entries {
eventDetails := fmt.Sprintf("[%q(%d)|%s|%s|%s|%s]", entry.Source.Name, entry.Source.SequenceID,
entry.Source.Type, entry.EventType, entry.Source.ID, entry.EventID)
if entry.EventID == "" {
fs.Debugf(f, "%s ignored due to missing EventID", eventDetails)
continue
}
if _, ok := processedEventIDs[entry.EventID]; ok {
fs.Debugf(f, "%s ignored due to duplicate EventID", eventDetails)
continue
}
processedEventIDs[entry.EventID] = time.Now()
newEventIDs++
if entry.Source.ID == "" { // missing File or Folder ID
fs.Debugf(f, "%s ignored due to missing SourceID", eventDetails)
continue
}
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
fs.Debugf(f, "%s ignored due to unsupported SourceType", eventDetails)
continue
}
// Only interested in event types that result in a file tree change
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
fs.Debugf(f, "%s ignored due to unsupported EventType", eventDetails)
continue
}
f.itemMetaCacheMu.Lock()
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
if cachedItemMetaFound {
if itemMeta.SequenceID >= entry.Source.SequenceID {
// Item in the cache has the same or newer SequenceID than
// this event. Ignore this event, it must be old.
f.itemMetaCacheMu.Unlock()
fs.Debugf(f, "%s ignored due to old SequenceID (%q)", eventDetails, itemMeta.SequenceID)
continue
}
// This event is newer. Delete its entry from the cache,
// we'll notify about its change below, then it's up to a
// future list operation to repopulate the cache.
delete(f.itemMetaCache, entry.Source.ID)
}
f.itemMetaCacheMu.Unlock()
entryType := fs.EntryDirectory
if entry.Source.Type == api.ItemTypeFile {
entryType = fs.EntryObject
}
// The box event only includes the new path for the object (e.g.
// the path after the object was moved). If there was an old path
// saved in our cache, it must be cleared.
if cachedItemMetaFound {
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
if path != "" {
fs.Debugf(f, "%s added old path (%q) for notify", eventDetails, path)
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
} else {
fs.Debugf(f, "%s old parent not cached", eventDetails)
}
// If this is a directory, also delete it from the dir cache.
// This will effectively invalidate the item metadata cache
// entries for all descendents of this directory, since we
// will no longer be able to construct a full path for them.
// This is exactly what we want, since we don't want to notify
// on the paths of these descendents if one of their ancestors
// has been renamed/deleted.
if entry.Source.Type == api.ItemTypeFolder {
f.dirCache.FlushDir(path)
}
}
// If the item is "active", then it is not trashed or deleted, so
// it potentially has a valid parent.
//
// Construct the new path of the object, based on the Parent ID
// and its name. If we get an empty result, it means we don't
// currently know about this object so notification is unnecessary.
if entry.Source.ItemStatus == api.ItemStatusActive {
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
if path != "" {
fs.Debugf(f, "%s added new path (%q) for notify", eventDetails, path)
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
} else {
fs.Debugf(f, "%s new parent not found", eventDetails)
}
}
}
// box can sometimes repeatedly return the same Event IDs within a
// short period of time. If it stops giving us new ones, treat it
// the same as if it returned us none at all.
if newEventIDs == 0 {
return nextStreamPosition, nil
}
notifiedPaths := make(map[string]bool)
for _, p := range pathsToClear {
if _, ok := notifiedPaths[p.path]; ok {
continue
}
notifiedPaths[p.path] = true
notifyFunc(p.path, p.entryType)
}
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
}
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {
@@ -1741,7 +1399,6 @@ var (
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -105,7 +105,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
const defaultDelay = 10
var tries int
outer:
for tries = range maxTries {
for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
if err != nil {
@@ -203,7 +203,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
errs := make(chan error, 1)
var wg sync.WaitGroup
outer:
for part := range session.TotalParts {
for part := 0; part < session.TotalParts; part++ {
// Check any errors
select {
case err = <-errs:
@@ -211,7 +211,10 @@ outer:
default:
}
reqSize := min(remaining, chunkSize)
reqSize := remaining
if reqSize >= chunkSize {
reqSize = chunkSize
}
// Make a block of memory
buf := make([]byte, reqSize)

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js
// +build !plan9,!js
// Package cache implements a virtual provider to cache existing remotes.
package cache
@@ -29,7 +30,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
@@ -410,16 +410,18 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if err != nil {
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
decPass, err := obscure.Reveal(opt.PlexPassword)
if err != nil {
decPass = opt.PlexPassword
}
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
m.Set("plex_token", token)
})
if err != nil {
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
} else {
if opt.PlexPassword != "" && opt.PlexUsername != "" {
decPass, err := obscure.Reveal(opt.PlexPassword)
if err != nil {
decPass = opt.PlexPassword
}
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
m.Set("plex_token", token)
})
if err != nil {
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
}
}
}
@@ -1087,13 +1089,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return cachedEntries, nil
}
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error {
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
entries, err := f.List(ctx, dir)
if err != nil {
return err
}
for i := range entries {
for i := 0; i < len(entries); i++ {
innerDir, ok := entries[i].(fs.Directory)
if ok {
err := f.recurse(ctx, innerDir.Remote(), list)
@@ -1139,7 +1141,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
}
// if we're here, we're gonna do a standard recursive traversal and cache everything
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
err = f.recurse(ctx, dir, list)
if err != nil {
return err
@@ -1429,7 +1431,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
}()
// wait until both are done
for range 2 {
for c := 0; c < 2; c++ {
<-done
}
}
@@ -1754,7 +1756,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
}
// Stats returns stats about the cache storage
func (f *Fs) Stats() (map[string]map[string]any, error) {
func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
return f.cache.Stats()
}
@@ -1934,7 +1936,7 @@ var commandHelp = []fs.CommandHelp{
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name {
case "stats":
return f.Stats()

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test
@@ -10,6 +11,7 @@ import (
goflag "flag"
"fmt"
"io"
"log"
"math/rand"
"os"
"path"
@@ -28,11 +30,10 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/require"
)
@@ -92,7 +93,7 @@ func TestMain(m *testing.M) {
goflag.Parse()
var rc int
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName)
log.Printf("Running with the following params: \n remote: %v", remoteName)
runInstance = newRun()
rc = m.Run()
os.Exit(rc)
@@ -122,10 +123,10 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
/* TODO: is this testing something?
func TestInternalVfsCache(t *testing.T) {
vfscommon.Opt.DirCacheTime = time.Second * 30
vfsflags.Opt.DirCacheTime = time.Second * 30
testSize := int64(524288000)
vfscommon.Opt.CacheMode = vfs.CacheModeWrites
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -337,7 +338,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
@@ -360,14 +361,14 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
require.NoError(t, err)
require.Equal(t, int64(len(checkSample)), o.Size())
for i := range checkSample {
for i := 0; i < len(checkSample); i++ {
require.Equal(t, testData[i], checkSample[i])
}
}
func TestInternalLargeWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
@@ -387,7 +388,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
require.NoError(t, err)
for i := range readData {
for i := 0; i < len(readData); i++ {
require.Equalf(t, testData[i], readData[i], "at byte %v", i)
}
}
@@ -407,7 +408,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
// update in the wrapped fs
originalSize, err := runInstance.size(t, rootFs, "data.bin")
require.NoError(t, err)
fs.Logf(nil, "original size: %v", originalSize)
log.Printf("original size: %v", originalSize)
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
@@ -416,7 +417,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
if runInstance.rootIsCrypt {
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
require.NoError(t, err)
expectedSize++ // FIXME newline gets in, likely test data issue
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
} else {
data2 = []byte("test content")
}
@@ -424,7 +425,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
require.NoError(t, err)
require.Equal(t, int64(len(data2)), o.Size())
fs.Logf(nil, "updated size: %v", len(data2))
log.Printf("updated size: %v", len(data2))
// get a new instance from the cache
if runInstance.wrappedIsExternal {
@@ -484,49 +485,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
err = runInstance.retryBlock(func() error {
li, err := runInstance.list(t, rootFs, "test")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 2 {
fs.Logf(nil, "not expected listing /test: %v", li)
log.Printf("not expected listing /test: %v", li)
return fmt.Errorf("not expected listing /test: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/one")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 0 {
fs.Logf(nil, "not expected listing /test/one: %v", li)
log.Printf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/second")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 1 {
fs.Logf(nil, "not expected listing /test/second: %v", li)
log.Printf("not expected listing /test/second: %v", li)
return fmt.Errorf("not expected listing /test/second: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "data.bin" {
fs.Logf(nil, "not expected name: %v", fi.Name())
log.Printf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/second/data.bin" {
fs.Logf(nil, "not expected remote: %v", di.Remote())
log.Printf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
fs.Logf(nil, "unexpected listing: %v", li)
log.Printf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
fs.Logf(nil, "complete listing: %v", li)
log.Printf("complete listing: %v", li)
return nil
}, 12, time.Second*10)
require.NoError(t, err)
@@ -576,43 +577,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
err = runInstance.retryBlock(func() error {
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
if !found {
fs.Logf(nil, "not found /test")
log.Printf("not found /test")
return fmt.Errorf("not found /test")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
if !found {
fs.Logf(nil, "not found /test/one")
log.Printf("not found /test/one")
return fmt.Errorf("not found /test/one")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
if !found {
fs.Logf(nil, "not found /test/one/test2")
log.Printf("not found /test/one/test2")
return fmt.Errorf("not found /test/one/test2")
}
li, err := runInstance.list(t, rootFs, "test/one")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 1 {
fs.Logf(nil, "not expected listing /test/one: %v", li)
log.Printf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "test2" {
fs.Logf(nil, "not expected name: %v", fi.Name())
log.Printf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/one/test2" {
fs.Logf(nil, "not expected remote: %v", di.Remote())
log.Printf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
fs.Logf(nil, "unexpected listing: %v", li)
log.Printf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
fs.Logf(nil, "complete listing /test/one/test2")
log.Printf("complete listing /test/one/test2")
return nil
}, 12, time.Second*10)
require.NoError(t, err)
@@ -688,7 +689,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
co, ok := o.(*cache.Object)
require.True(t, ok)
for i := range 4 { // read first 4
for i := 0; i < 4; i++ { // read first 4
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
}
cfs.CleanUpCache(true)
@@ -707,7 +708,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
func TestInternalExpiredEntriesRemoved(t *testing.T) {
id := fmt.Sprintf("tieer%v", time.Now().Unix())
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -742,7 +743,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
}
func TestInternalBug2117(t *testing.T) {
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10)
vfsflags.Opt.DirCacheTime = time.Second * 10
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
@@ -770,24 +771,24 @@ func TestInternalBug2117(t *testing.T) {
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 1)
time.Sleep(time.Second * 30)
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 1)
di, err = runInstance.list(t, rootFs, "test/dir1")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 4)
di, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 4)
}
@@ -828,7 +829,7 @@ func newRun() *run {
} else {
r.tmpUploadDir = uploadDir
}
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir)
log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
return r
}
@@ -849,8 +850,8 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise()
remoteExists := false
for _, s := range config.GetRemotes() {
if s.Name == remote {
for _, s := range config.FileSections() {
if s == remote {
remoteExists = true
}
}
@@ -874,12 +875,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
cacheRemote := remote
if !remoteExists {
localRemote := remote + "-local"
config.FileSetValue(localRemote, "type", "local")
config.FileSetValue(localRemote, "nounc", "true")
config.FileSet(localRemote, "type", "local")
config.FileSet(localRemote, "nounc", "true")
m.Set("type", "cache")
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
} else {
remoteType := config.GetValue(remote, "type")
remoteType := config.FileGet(remote, "type")
if remoteType == "" {
t.Skipf("skipped due to invalid remote type for %v", remote)
return nil, nil
@@ -890,14 +891,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
m.Set("password", cryptPassword1)
m.Set("password2", cryptPassword2)
}
remoteRemote := config.GetValue(remote, "remote")
remoteRemote := config.FileGet(remote, "remote")
if remoteRemote == "" {
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
return nil, nil
}
remoteRemoteParts := strings.Split(remoteRemote, ":")
remoteWrapping := remoteRemoteParts[0]
remoteType := config.GetValue(remoteWrapping, "type")
remoteType := config.FileGet(remoteWrapping, "type")
if remoteType != "cache" {
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
return nil, nil
@@ -934,7 +935,8 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
if purge {
_ = operations.Purge(context.Background(), f, "")
_ = f.Features().Purge(context.Background(), "")
require.NoError(t, err)
}
err = f.Mkdir(context.Background(), "")
require.NoError(t, err)
@@ -947,7 +949,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
err := operations.Purge(context.Background(), f, "")
err := f.Features().Purge(context.Background(), "")
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
@@ -971,7 +973,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
f, err := os.CreateTemp("", "rclonecache-tempfile")
require.NoError(t, err)
for range int(cnt) {
for i := 0; i < int(cnt); i++ {
data := randStringBytes(int(chunk))
_, _ = f.Write(data)
}
@@ -1085,9 +1087,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
return err
}
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) {
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
var err error
var l []any
var l []interface{}
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
@@ -1191,7 +1193,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
func (r *run) cleanSize(t *testing.T, size int64) int64 {
if r.rootIsCrypt {
denominator := int64(65536 + 16)
size -= 32
size = size - 32
quotient := size / denominator
remainder := size % denominator
return (quotient*65536 + remainder - 16)
@@ -1215,7 +1217,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
var err error
var state cache.BackgroundUploadState
for range 2 {
for i := 0; i < 2; i++ {
select {
case state = <-buCh:
// continue
@@ -1293,7 +1295,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
var err error
for range maxRetries {
for i := 0; i < maxRetries; i++ {
err = block()
if err == nil {
return nil

View File

@@ -1,6 +1,7 @@
// Test Cache filesystem interface
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test
@@ -15,11 +16,10 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
})
}

View File

@@ -2,6 +2,6 @@
// about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js
// Package cache implements a virtual provider to cache existing remotes.
package cache

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test
@@ -162,7 +163,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
lastFile := ""
for i := range totalFiles {
for i := 0; i < totalFiles; i++ {
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
@@ -118,7 +119,7 @@ func (r *Handle) startReadWorkers() {
r.scaleWorkers(totalWorkers)
}
// scaleWorkers will increase the worker pool count by the provided amount
// scaleOutWorkers will increase the worker pool count by the provided amount
func (r *Handle) scaleWorkers(desired int) {
current := r.workers
if current == desired {
@@ -182,7 +183,7 @@ func (r *Handle) queueOffset(offset int64) {
}
}
for i := range r.workers {
for i := 0; i < r.workers; i++ {
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
if o < 0 || o >= r.cachedObject.Size() {
continue
@@ -208,7 +209,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
// we align the start offset of the first chunk to a likely chunk in the storage
chunkStart -= offset
chunkStart = chunkStart - offset
r.queueOffset(chunkStart)
found := false
@@ -222,7 +223,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
if !found {
// we're gonna give the workers a chance to pickup the chunk
// and retry a couple of times
for i := range r.cacheFs().opt.ReadRetries * 8 {
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
if err == nil {
found = true
@@ -327,7 +328,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
}
r.queueOffset(chunkStart)
@@ -415,8 +416,10 @@ func (w *worker) run() {
continue
}
}
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
continue
} else {
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
continue
}
}
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
@@ -209,7 +210,7 @@ func (p *plexConnector) authenticate() error {
if err != nil {
return err
}
var data map[string]any
var data map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil {
return fmt.Errorf("failed to obtain token: %w", err)
@@ -273,11 +274,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
}
// adapted from: https://stackoverflow.com/a/28878037 (credit)
func get(m any, path ...any) (any, bool) {
func get(m interface{}, path ...interface{}) (interface{}, bool) {
for _, p := range path {
switch idx := p.(type) {
case string:
if mm, ok := m.(map[string]any); ok {
if mm, ok := m.(map[string]interface{}); ok {
if val, found := mm[idx]; found {
m = val
continue
@@ -285,7 +286,7 @@ func get(m any, path ...any) (any, bool) {
}
return nil, false
case int:
if mm, ok := m.([]any); ok {
if mm, ok := m.([]interface{}); ok {
if len(mm) > idx {
m = mm[idx]
continue

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
@@ -18,7 +19,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
"go.etcd.io/bbolt/errors"
)
// Constants
@@ -598,7 +598,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
})
if err != nil {
if err == errors.ErrDatabaseNotOpen {
if err == bolt.ErrDatabaseNotOpen {
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
return
}
@@ -607,16 +607,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
}
// Stats returns a go map with the stats key values
func (b *Persistent) Stats() (map[string]map[string]any, error) {
r := make(map[string]map[string]any)
r["data"] = make(map[string]any)
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
r := make(map[string]map[string]interface{})
r["data"] = make(map[string]interface{})
r["data"]["oldest-ts"] = time.Now()
r["data"]["oldest-file"] = ""
r["data"]["newest-ts"] = time.Now()
r["data"]["newest-file"] = ""
r["data"]["total-chunks"] = 0
r["data"]["total-size"] = int64(0)
r["files"] = make(map[string]any)
r["files"] = make(map[string]interface{})
r["files"]["oldest-ts"] = time.Now()
r["files"]["oldest-name"] = ""
r["files"]["newest-ts"] = time.Now()

View File

@@ -1,6 +1,3 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
import bolt "go.etcd.io/bbolt"

View File

@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/encoder"
)
// Chunker's composite files have one or more chunks
@@ -102,10 +101,8 @@ var (
//
// And still chunker's primary function is to chunk large files
// rather than serve as a generic metadata container.
const (
maxMetadataSize = 1023
maxMetadataSizeWritten = 255
)
const maxMetadataSize = 1023
const maxMetadataSizeWritten = 255
// Current/highest supported metadata format.
const metadataVersion = 2
@@ -308,6 +305,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
root: rpath,
opt: *opt,
}
cache.PinUntilFinalized(f.base, f)
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
@@ -319,45 +317,29 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// i.e. `rpath` does not exist in the wrapped remote, but chunker
// detects a composite file because it finds the first chunk!
// (yet can't satisfy fstest.CheckListing, will ignore)
if err == nil && !f.useMeta {
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
newBase, testErr := cache.Get(ctx, baseName+firstChunkPath)
_, testErr := cache.Get(ctx, baseName+firstChunkPath)
if testErr == fs.ErrorIsFile {
f.base = newBase
err = testErr
}
}
cache.PinUntilFinalized(f.base, f)
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
// Note 1: the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs.
// Note 2: features.Fill() points features.PutStream to our PutStream,
// but features.Mask() will nullify it if wrappedFs does not have it.
f.features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: true,
ReadMimeType: false, // Object.MimeType not supported
WriteMimeType: true,
BucketBased: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
CaseInsensitive: true,
DuplicateFiles: true,
ReadMimeType: false, // Object.MimeType not supported
WriteMimeType: true,
BucketBased: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: true,
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
f.features.ListR = nil // Recursive listing may cause chunker skip files
f.features.ListP = nil // ListP not supported yet
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
return f, err
}
@@ -633,7 +615,7 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
// forbidChunk prints error message or raises error if file is chunk.
// First argument sets log prefix, use `false` to suppress message.
func (f *Fs) forbidChunk(o any, filePath string) error {
func (f *Fs) forbidChunk(o interface{}, filePath string) error {
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
if f.opt.FailHard {
return fmt.Errorf("chunk overlap with %q", parentPath)
@@ -681,7 +663,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
circleSec := unixSec % closestPrimeZzzzSeconds
first4chars := strconv.FormatInt(circleSec, 36)
for range maxTransactionProbes {
for tries := 0; tries < maxTransactionProbes; tries++ {
f.xactIDMutex.Lock()
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
f.xactIDMutex.Unlock()
@@ -831,7 +813,8 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
}
case fs.Directory:
isSubdir[entry.Remote()] = true
wrapDir := fs.NewDirWrapper(entry.Remote(), entry)
wrapDir := fs.NewDirCopy(ctx, entry)
wrapDir.SetRemote(entry.Remote())
tempEntries = append(tempEntries, wrapDir)
default:
if f.opt.FailHard {
@@ -964,11 +947,6 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
}
if caseInsensitive {
sameMain = strings.EqualFold(mainRemote, remote)
if sameMain && f.base.Features().IsLocal {
// on local, make sure the EqualFold still holds true when accounting for encoding.
// sometimes paths with special characters will only normalize the same way in Standard Encoding.
sameMain = strings.EqualFold(encoder.OS.FromStandardPath(mainRemote), encoder.OS.FromStandardPath(remote))
}
} else {
sameMain = mainRemote == remote
}
@@ -982,13 +960,13 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
}
continue
}
// fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
if err := o.addChunk(entry, chunkNo); err != nil {
return nil, err
}
}
if o.main == nil && len(o.chunks) == 0 {
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
// Scanning hasn't found data chunks with conforming names.
if f.useMeta || quickScan {
// Metadata is required but absent and there are no chunks.
@@ -1144,8 +1122,8 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
// put implements Put, PutStream, PutUnchecked, Update
func (f *Fs) put(
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
basePut putFn, action string, target fs.Object,
) (obj fs.Object, err error) {
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
// Perform consistency checks
if err := f.forbidChunk(src, remote); err != nil {
return nil, fmt.Errorf("%s refused: %w", action, err)
@@ -1190,7 +1168,10 @@ func (f *Fs) put(
}
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
size := min(c.sizeLeft, c.chunkSize)
size := c.sizeLeft
if size > c.chunkSize {
size = c.chunkSize
}
savedReadCount := c.readCount
// If a single chunk is expected, avoid the extra rename operation
@@ -1475,7 +1456,10 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
const bufLen = 1048576 // 1 MiB
buf := make([]byte, bufLen)
for size > 0 {
n := min(size, bufLen)
n := size
if n > bufLen {
n = bufLen
}
if _, err := io.ReadFull(in, buf[0:n]); err != nil {
return err
}
@@ -1579,14 +1563,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.base.Mkdir(ctx, dir)
}
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
if do := f.base.Features().MkdirMetadata; do != nil {
return do(ctx, dir, metadata)
}
return nil, fs.ErrorNotImplemented
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
@@ -1904,14 +1880,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return do(ctx, srcFs.base, srcRemote, dstRemote)
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
if do := f.base.Features().DirSetModTime; do != nil {
return do(ctx, dir, modTime)
}
return fs.ErrorNotImplemented
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
@@ -1960,7 +1928,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
return
}
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
//fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
if entryType == fs.EntryObject {
mainPath, _, _, xactID := f.parseChunkName(path)
metaXactID := ""
@@ -2475,7 +2443,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
if len(data) > maxMetadataSizeWritten {
return nil, false, ErrMetaTooBig
}
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
return nil, false, errors.New("invalid json")
}
var metadata metaSimpleJSON
@@ -2572,8 +2540,6 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)

View File

@@ -40,7 +40,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
})
}
type settings map[string]any
type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash

View File

@@ -36,17 +36,14 @@ func TestIntegration(t *testing.T) {
"GetTier",
"SetTier",
"Metadata",
"SetMetadata",
},
UnimplementableFsMethods: []string{
"PublicLink",
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"UserInfo",
"Disconnect",
"ListP",
},
}
if *fstest.RemoteName == "" {

View File

@@ -1,48 +0,0 @@
// Package api has type definitions for cloudinary
package api
import (
"fmt"
)
// CloudinaryEncoder extends the built-in encoder
type CloudinaryEncoder interface {
// FromStandardPath takes a / separated path in Standard encoding
// and converts it to a / separated path in this encoding.
FromStandardPath(string) string
// FromStandardName takes name in Standard encoding and converts
// it in this encoding.
FromStandardName(string) string
// ToStandardPath takes a / separated path in this encoding
// and converts it to a / separated path in Standard encoding.
ToStandardPath(string) string
// ToStandardName takes name in this encoding and converts
// it in Standard encoding.
ToStandardName(string, string) string
// Encoded root of the remote (as passed into NewFs)
FromStandardFullPath(string) string
}
// UpdateOptions was created to pass options from Update to Put
type UpdateOptions struct {
PublicID string
ResourceType string
DeliveryType string
AssetFolder string
DisplayName string
}
// Header formats the option as a string
func (o *UpdateOptions) Header() (string, string) {
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *UpdateOptions) Mandatory() bool {
return false
}
// String formats the option into human-readable form
func (o *UpdateOptions) String() string {
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}

View File

@@ -1,754 +0,0 @@
// Package cloudinary provides an interface to the Cloudinary DAM
package cloudinary
import (
"context"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"slices"
"strconv"
"strings"
"time"
"github.com/cloudinary/cloudinary-go/v2"
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
"github.com/cloudinary/cloudinary-go/v2/api/admin"
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
"github.com/rclone/rclone/backend/cloudinary/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"github.com/zeebo/blake3"
)
// Cloudinary shouldn't have a trailing dot if there is no path
func cldPathDir(somePath string) string {
if somePath == "" || somePath == "." {
return somePath
}
dir := path.Dir(somePath)
if dir == "." {
return ""
}
return dir
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "cloudinary",
Description: "Cloudinary",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "cloud_name",
Help: "Cloudinary Environment Name",
Required: true,
Sensitive: true,
},
{
Name: "api_key",
Help: "Cloudinary API Key",
Required: true,
Sensitive: true,
},
{
Name: "api_secret",
Help: "Cloudinary API Secret",
Required: true,
Sensitive: true,
},
{
Name: "upload_prefix",
Help: "Specify the API endpoint for environments out of the US",
},
{
Name: "upload_preset",
Help: "Upload Preset to select asset manipulation on upload",
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
encoder.EncodeSlash |
encoder.EncodeLtGt |
encoder.EncodeDoubleQuote |
encoder.EncodeQuestion |
encoder.EncodeAsterisk |
encoder.EncodePipe |
encoder.EncodeHash |
encoder.EncodePercent |
encoder.EncodeBackSlash |
encoder.EncodeDel |
encoder.EncodeCtl |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8 |
encoder.EncodeDot),
},
{
Name: "eventually_consistent_delay",
Default: fs.Duration(0),
Advanced: true,
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
},
{
Name: "adjust_media_files_extensions",
Default: true,
Advanced: true,
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
},
{
Name: "media_extensions",
Default: []string{
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
Advanced: true,
Help: "Cloudinary supported media extensions",
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
MediaExtensions []string `config:"media_extensions"`
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
}
// Fs represents a remote cloudinary server
type Fs struct {
name string
root string
opt Options
features *fs.Features
pacer *fs.Pacer
srv *rest.Client // For downloading assets via the Cloudinary CDN
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
lastCRUD time.Time
}
// Object describes a cloudinary object
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
url string
md5sum string
publicID string
resourceType string
deliveryType string
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Initialize the Cloudinary client
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
if err != nil {
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
}
cld.Admin.Client = *fshttp.NewClient(ctx)
cld.Upload.Client = *fshttp.NewClient(ctx)
if opt.UploadPrefix != "" {
cld.Config.API.UploadPrefix = opt.UploadPrefix
}
client := fshttp.NewClient(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
cld: cld,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
srv: rest.NewClient(client),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
if root != "" {
// Check to see if the root actually an existing file
remote := path.Base(root)
f.root = cldPathDir(root)
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return the previous root
f.root = root
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// ------------------------------------------------------------
// FromStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
}
// FromStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardName(s string) string {
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(s)
ext := ""
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
s = strings.TrimSuffix(parsedURL.Path, ext)
}
}
}
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
}
// ToStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
}
// ToStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardName(s string, assetURL string) string {
ext := ""
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(assetURL)
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
ext = ""
}
}
}
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
}
// FromStandardFullPath encodes a full path to Cloudinary standard
func (f *Fs) FromStandardFullPath(dir string) string {
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
}
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
func (f *Fs) ToAssetFolderAPI(dir string) string {
return strings.ReplaceAll(dir, "%", "%25")
}
// ToDisplayNameElastic encodes a special case of elasticsearch
func (f *Fs) ToDisplayNameElastic(dir string) string {
return strings.ReplaceAll(dir, "!", "\\!")
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// WaitEventuallyConsistent waits till the FS is eventually consistent
func (f *Fs) WaitEventuallyConsistent() {
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
return
}
delay := time.Duration(f.opt.EventuallyConsistentDelay)
timeSinceLastCRUD := time.Since(f.lastCRUD)
if timeSinceLastCRUD < delay {
time.Sleep(delay - timeSinceLastCRUD)
}
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Cloudinary root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// List the objects and directories in dir into entries
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
remotePrefix := f.FromStandardFullPath(dir)
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
remotePrefix += "/"
}
var entries fs.DirEntries
dirs := make(map[string]struct{})
nextCursor := ""
f.WaitEventuallyConsistent()
for {
// user the folders api to list folders.
folderParams := admin.SubFoldersParams{
Folder: f.ToAssetFolderAPI(remotePrefix),
MaxResults: 500,
}
if nextCursor != "" {
folderParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
}
if results.Error.Message != "" {
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
return nil, fs.ErrorDirNotFound
}
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
}
for _, folder := range results.Folders {
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
parts := strings.Split(relativePath, "/")
// It's a directory
dirName := parts[len(parts)-1]
if _, found := dirs[dirName]; !found {
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
entries = append(entries, d)
dirs[dirName] = struct{}{}
}
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
for {
// Use the assets.AssetsByAssetFolder API to list assets
assetsParams := admin.AssetsByAssetFolderParams{
AssetFolder: remotePrefix,
MaxResults: 500,
}
if nextCursor != "" {
assetsParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
if err != nil {
return nil, fmt.Errorf("failed to list assets: %w", err)
}
for _, asset := range results.Assets {
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.CreatedAt,
url: asset.SecureURL,
publicID: asset.PublicID,
resourceType: asset.AssetType,
deliveryType: asset.Type,
}
entries = append(entries, o)
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
return entries, nil
}
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
searchParams := search.Query{
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
f.FromStandardFullPath(cldPathDir(remote)),
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
MaxResults: 2,
}
var results *admin.SearchResult
f.WaitEventuallyConsistent()
err := f.pacer.Call(func() (bool, error) {
var err1 error
results, err1 = f.cld.Admin.Search(ctx, searchParams)
if err1 == nil && results.TotalCount != len(results.Assets) {
err1 = errors.New("partial response so waiting for eventual consistency")
}
return shouldRetry(ctx, nil, err1)
})
if err != nil {
return nil, fs.ErrorObjectNotFound
}
if results.TotalCount == 0 || len(results.Assets) == 0 {
return nil, fs.ErrorObjectNotFound
}
asset := results.Assets[0]
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.UploadedAt,
url: asset.SecureURL,
md5sum: asset.Etag,
publicID: asset.PublicID,
resourceType: asset.ResourceType,
deliveryType: asset.Type,
}
return o, nil
}
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
payload := []byte(path.Join(assetFolder, displayName))
hash := blake3.Sum256(payload)
return hex.EncodeToString(hash[:])
}
// Put uploads content to Cloudinary
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if src.Size() == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
params := uploader.UploadParams{
UploadPreset: f.opt.UploadPreset,
}
updateObject := false
var modTime time.Time
for _, option := range options {
if updateOptions, ok := option.(*api.UpdateOptions); ok {
if updateOptions.PublicID != "" {
updateObject = true
params.Overwrite = SDKApi.Bool(true)
params.Invalidate = SDKApi.Bool(true)
params.PublicID = updateOptions.PublicID
params.ResourceType = updateOptions.ResourceType
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
params.AssetFolder = updateOptions.AssetFolder
params.DisplayName = updateOptions.DisplayName
modTime = src.ModTime(ctx)
}
}
}
if !updateObject {
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
}
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
f.lastCRUD = time.Now()
if err != nil {
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
}
if !updateObject {
modTime = uploadResult.CreatedAt
}
if uploadResult.Error.Message != "" {
return nil, errors.New(uploadResult.Error.Message)
}
o := &Object{
fs: f,
remote: src.Remote(),
size: int64(uploadResult.Bytes),
modTime: modTime,
url: uploadResult.SecureURL,
md5sum: uploadResult.Etag,
publicID: uploadResult.PublicID,
resourceType: uploadResult.ResourceType,
deliveryType: uploadResult.Type,
}
return o, nil
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash sets
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// Mkdir creates empty folders
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
res, err := f.cld.Admin.CreateFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
return nil
}
// Rmdir deletes empty folders
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Additional test because Cloudinary will delete folders without
// assets, regardless of empty sub-folders
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
folderParams := admin.SubFoldersParams{
Folder: folder,
MaxResults: 1,
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return err
}
if results.TotalCount > 0 {
return fs.ErrorDirectoryNotEmpty
}
params := admin.DeleteFolderParams{Folder: folder}
res, err := f.cld.Admin.DeleteFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
return fs.ErrorDirNotFound
}
return errors.New(res.Error.Message)
}
return nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
420, // Too Many Requests (legacy)
429, // Too Many Requests
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if err != nil {
tryAgain := "Try again on "
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
layout := "2006-01-02 15:04:05 UTC"
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
timestamp, err2 := time.Parse(layout, dateStr)
if err2 == nil {
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
}
}
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// ------------------------------------------------------------
// Hash returns the MD5 of an object
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5sum, nil
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// Size of object in bytes
func (o *Object) Size() int64 {
return o.size
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: o.url,
Options: options,
}
var offset int64
var count int64
var key string
var value string
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
if count < 0 {
count = o.size - offset
}
key, value = option.Header()
case *fs.SeekOption:
offset = x.Offset
count = o.size - offset
key, value = option.Header()
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if key != "" && value != "" {
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders[key] = value
}
// Make sure that the asset is fully available
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
if err == nil {
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
if clErr == nil && count == int64(cl) {
return false, nil
}
}
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
}
return resp.Body, err
}
// Update the object with the contents of the io.Reader
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
options = append(options, &api.UpdateOptions{
PublicID: o.publicID,
ResourceType: o.resourceType,
DeliveryType: o.deliveryType,
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
})
updatedObj, err := o.fs.Put(ctx, in, src, options...)
if err != nil {
return err
}
if uo, ok := updatedObj.(*Object); ok {
o.size = uo.size
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
o.url = uo.url
o.md5sum = uo.md5sum
o.publicID = uo.publicID
o.resourceType = uo.resourceType
o.deliveryType = uo.deliveryType
}
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
params := uploader.DestroyParams{
PublicID: o.publicID,
ResourceType: o.resourceType,
Type: o.deliveryType,
}
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
o.fs.lastCRUD = time.Now()
if dErr != nil {
return dErr
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
if res.Result != "ok" {
return errors.New(res.Result)
}
return nil
}

View File

@@ -1,23 +0,0 @@
// Test Cloudinary filesystem interface
package cloudinary_test
import (
"testing"
"github.com/rclone/rclone/backend/cloudinary"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
name := "TestCloudinary"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*cloudinary.Object)(nil),
SkipInvalidUTF8: true,
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
},
})
}

View File

@@ -20,7 +20,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"golang.org/x/sync/errgroup"
@@ -223,23 +222,18 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
}
// check features
var features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
BucketBased: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
BucketBased: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f)
canMove := true
for _, u := range f.upstreams {
@@ -266,9 +260,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
}
}
// Enable ListP always
features.ListP = f.ListP
// Enable Purge when any upstreams support it
if features.Purge == nil {
for _, u := range f.upstreams {
@@ -449,32 +440,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return u.f.Mkdir(ctx, uRemote)
}
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return nil, err
}
do := u.f.Features().MkdirMetadata
if do == nil {
return nil, fs.ErrorNotImplemented
}
newDir, err := do(ctx, uRemote, metadata)
if err != nil {
return nil, err
}
entries := fs.DirEntries{newDir}
entries, err = u.wrapEntries(ctx, entries)
if err != nil {
return nil, err
}
newDir, ok := entries[0].(fs.Directory)
if !ok {
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
}
return newDir, nil
}
// purge the upstream or fallback to a slow way
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
if do := u.f.Features().Purge; do != nil {
@@ -790,11 +755,12 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
case fs.Object:
entries[i] = u.newObject(x)
case fs.Directory:
newPath, err := u.pathAdjustment.do(x.Remote())
newDir := fs.NewDirCopy(ctx, x)
newPath, err := u.pathAdjustment.do(newDir.Remote())
if err != nil {
return nil, err
}
newDir := fs.NewDirWrapper(newPath, x)
newDir.SetRemote(newPath)
entries[i] = newDir
default:
return nil, fmt.Errorf("unknown entry type %T", entry)
@@ -813,52 +779,24 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
if f.root == "" && dir == "" {
entries := make(fs.DirEntries, 0, len(f.upstreams))
entries = make(fs.DirEntries, 0, len(f.upstreams))
for combineDir := range f.upstreams {
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
d := fs.NewDir(combineDir, f.when)
entries = append(entries, d)
}
return callback(entries)
return entries, nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
return nil, err
}
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := u.wrapEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
entries, err = u.f.List(ctx, uRemote)
if err != nil {
return nil, err
}
listP := u.f.Features().ListP
if listP == nil {
entries, err := u.f.List(ctx, uRemote)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
return u.wrapEntries(ctx, entries)
}
// ListR lists the objects and directories of the Fs starting
@@ -976,7 +914,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return do(ctx, uRemote, expire, unlink)
}
// PutUnchecked in to the remote path with the modTime given of the given size
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
@@ -1027,22 +965,6 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return do(ctx, uDirs)
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
u, uDir, err := f.findUpstream(dir)
if err != nil {
return err
}
if uDir == "" {
fs.Debugf(dir, "Can't set modtime on upstream root. skipping.")
return nil
}
if do := u.f.Features().DirSetModTime; do != nil {
return do(ctx, uDir, modTime)
}
return fs.ErrorNotImplemented
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
@@ -1151,17 +1073,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx)
}
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
func (o *Object) SetTier(tier string) error {
@@ -1188,8 +1099,6 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil)

View File

@@ -11,7 +11,7 @@ import (
)
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
unimplementableObjectMethods = []string{}
)

View File

@@ -14,7 +14,6 @@ import (
"fmt"
"io"
"os"
"path"
"regexp"
"strings"
"time"
@@ -29,7 +28,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
@@ -39,7 +37,6 @@ import (
const (
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
chunkStreams = 0 // Streams to use for reading
bufferSize = 8388608
heuristicBytes = 1048576
@@ -175,33 +172,21 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
opt: *opt,
mode: compressionModeFromName(opt.CompressionMode),
}
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: false,
WriteMimeType: false,
GetTier: true,
SetTier: true,
BucketBased: true,
CanHaveEmptyDirectories: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: false,
WriteMimeType: false,
GetTier: true,
SetTier: true,
BucketBased: true,
CanHaveEmptyDirectories: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// We support reading MIME types no matter the wrapped fs
f.features.ReadMimeType = true
@@ -209,8 +194,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
if !operations.CanServerSideMove(wrappedFs) {
f.features.Disable("PutStream")
}
// Enable ListP always
f.features.ListP = f.ListP
return f, err
}
@@ -274,16 +257,6 @@ func isMetadataFile(filename string) bool {
return strings.HasSuffix(filename, metaFileExt)
}
// Checks whether a file is a metadata file and returns the original
// file name and a flag indicating whether it was a metadata file or
// not.
func unwrapMetadataFile(filename string) (string, bool) {
if !isMetadataFile(filename) {
return "", false
}
return filename[:len(filename)-len(metaFileExt)], true
}
// makeDataName generates the file name for a data file with specified compression mode
func makeDataName(remote string, size int64, mode int) (newRemote string) {
if mode != Uncompressed {
@@ -355,39 +328,11 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
// found.
// List entries and process them
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.processEntries(entries)
if err != nil {
return err
}
return callback(entries)
entries, err = f.Fs.List(ctx, dir)
if err != nil {
return nil, err
}
listP := f.Fs.Features().ListP
if listP == nil {
entries, err := f.Fs.List(ctx, dir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
return f.processEntries(entries)
}
// ListR lists the objects and directories of the Fs starting
@@ -487,7 +432,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
}
return nil
}
@@ -821,14 +766,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.Fs.Mkdir(ctx, dir)
}
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
if do := f.Fs.Features().MkdirMetadata; do != nil {
return do(ctx, dir, metadata)
}
return nil, fs.ErrorNotImplemented
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
@@ -972,14 +909,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return do(ctx, srcFs.Fs, srcRemote, dstRemote)
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
if do := f.Fs.Features().DirSetModTime; do != nil {
return do(ctx, dir, modTime)
}
return fs.ErrorNotImplemented
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
@@ -1050,8 +979,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
fs.Logf(f, "path %q entryType %d", path, entryType)
var (
wrappedPath string
isMetadataFile bool
wrappedPath string
)
switch entryType {
case fs.EntryDirectory:
@@ -1059,10 +987,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
case fs.EntryObject:
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
wrappedPath, isMetadataFile = unwrapMetadataFile(path)
if !isMetadataFile {
return
}
wrappedPath = makeMetadataName(path)
default:
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
return
@@ -1318,17 +1243,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx)
}
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
@@ -1394,7 +1308,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
}
// Get a chunkedreader for the wrapped object
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
// Get file handle
var file io.Reader
if offset != 0 {
@@ -1561,8 +1475,6 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)

View File

@@ -14,26 +14,23 @@ import (
"github.com/rclone/rclone/fstest/fstests"
)
var defaultOpt = fstests.Opt{
RemoteName: "TestCompress:",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{},
}
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &defaultOpt)
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{}}
fstests.Run(t, &opt)
}
// TestRemoteGzip tests GZIP compression
@@ -43,13 +40,27 @@ func TestRemoteGzip(t *testing.T) {
}
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
name := "TestCompressGzip"
opt := defaultOpt
opt.RemoteName = name + ":"
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
}
opt.QuickTestOK = true
fstests.Run(t, &opt)
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
UnimplementableObjectMethods: []string{
"GetTier",
"SetTier",
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
QuickTestOK: true,
})
}

View File

@@ -192,7 +192,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
dirNameEncrypt: dirNameEncrypt,
encryptedSuffix: ".bin",
}
c.buffers.New = func() any {
c.buffers.New = func() interface{} {
return new([blockSize]byte)
}
err := c.Key(password, salt)
@@ -329,14 +329,14 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
for _, runeValue := range plaintext {
dir += int(runeValue)
}
dir %= 256
dir = dir % 256
// We'll use this number to store in the result filename...
var result bytes.Buffer
_, _ = result.WriteString(strconv.Itoa(dir) + ".")
// but we'll augment it with the nameKey for real calculation
for i := range len(c.nameKey) {
for i := 0; i < len(c.nameKey); i++ {
dir += int(c.nameKey[i])
}
@@ -418,7 +418,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
}
// add the nameKey to get the real rotate distance
for i := range len(c.nameKey) {
for i := 0; i < len(c.nameKey); i++ {
dir += int(c.nameKey[i])
}
@@ -450,7 +450,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
if pos >= 26 {
pos -= 6
}
pos -= thisdir
pos = pos - thisdir
if pos < 0 {
pos += 52
}
@@ -664,7 +664,7 @@ func (n *nonce) increment() {
// add a uint64 to the nonce
func (n *nonce) add(x uint64) {
carry := uint16(0)
for i := range 8 {
for i := 0; i < 8; i++ {
digit := (*n)[i]
xDigit := byte(x)
x >>= 8
@@ -888,7 +888,7 @@ func (fh *decrypter) fillBuffer() (err error) {
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
// Zero out the bad block and continue
for i := range (*fh.buf)[:n] {
fh.buf[i] = 0
(*fh.buf)[i] = 0
}
}
fh.bufIndex = 0

View File

@@ -1307,7 +1307,10 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
end := len(ciphertext)
if underlyingLimit >= 0 {
end = min(int(underlyingOffset+underlyingLimit), len(ciphertext))
end = int(underlyingOffset + underlyingLimit)
if end > len(ciphertext) {
end = len(ciphertext)
}
}
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil
@@ -1487,7 +1490,7 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err)
// Test truncating the file at each possible point
for i := range len(file16) - 1 {
for i := 0; i < len(file16)-1; i++ {
what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
fh, err := c.newDecrypter(cd)

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
)
// Globals
@@ -131,16 +130,6 @@ trying to recover an encrypted file with errors and it is desired to
recover as much of the file as possible.`,
Default: false,
Advanced: true,
}, {
Name: "strict_names",
Help: `If set, this will raise an error when crypt comes across a filename that can't be decrypted.
(By default, rclone will just log a NOTICE and continue as normal.)
This can happen if encrypted and unencrypted files are stored in the same
directory (which is not recommended.) It may also indicate a more serious
problem that should be investigated.`,
Default: false,
Advanced: true,
}, {
Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string.
@@ -264,39 +253,24 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
cipher: cipher,
}
cache.PinUntilFinalized(f.Fs, f)
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false,
BucketBased: true,
CanHaveEmptyDirectories: true,
SetTier: true,
GetTier: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false,
BucketBased: true,
CanHaveEmptyDirectories: true,
SetTier: true,
GetTier: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// Enable ListP always
f.features.ListP = f.ListP
return f, err
}
@@ -313,7 +287,6 @@ type Options struct {
PassBadBlocks bool `config:"pass_bad_blocks"`
FilenameEncoding string `config:"filename_encoding"`
Suffix string `config:"suffix"`
StrictNames bool `config:"strict_names"`
}
// Fs represents a wrapped fs.Fs
@@ -348,64 +321,45 @@ func (f *Fs) String() string {
}
// Encrypt an object file name to entries.
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) error {
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
remote := obj.Remote()
decryptedRemote, err := f.cipher.DecryptFileName(remote)
if err != nil {
if f.opt.StrictNames {
return fmt.Errorf("%s: undecryptable file name detected: %v", remote, err)
}
fs.Logf(remote, "Skipping undecryptable file name: %v", err)
return nil
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
return
}
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newObject(obj))
return nil
}
// Encrypt a directory file name to entries.
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) error {
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
if f.opt.StrictNames {
return fmt.Errorf("%s: undecryptable dir name detected: %v", remote, err)
}
fs.Logf(remote, "Skipping undecryptable dir name: %v", err)
return nil
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
return
}
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newDir(ctx, dir))
return nil
}
// Encrypt some directory entries. This alters entries returning it as newEntries.
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter
errors := 0
var firsterr error
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
err = f.add(&newEntries, x)
f.add(&newEntries, x)
case fs.Directory:
err = f.addDir(ctx, &newEntries, x)
f.addDir(ctx, &newEntries, x)
default:
return nil, fmt.Errorf("unknown object type %T", entry)
}
if err != nil {
errors++
if firsterr == nil {
firsterr = err
}
}
}
if firsterr != nil {
return nil, fmt.Errorf("there were %v undecryptable name errors. first error: %v", errors, firsterr)
}
return newEntries, nil
}
@@ -420,40 +374,11 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.encryptEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
if err != nil {
return nil, err
}
listP := f.Fs.Features().ListP
encryptedDir := f.cipher.EncryptDirName(dir)
if listP == nil {
entries, err := f.Fs.List(ctx, encryptedDir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, encryptedDir, wrappedCallback)
return f.encryptEntries(ctx, entries)
}
// ListR lists the objects and directories of the Fs starting
@@ -553,7 +478,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}
@@ -588,37 +513,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
}
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
do := f.Fs.Features().MkdirMetadata
if do == nil {
return nil, fs.ErrorNotImplemented
}
newDir, err := do(ctx, f.cipher.EncryptDirName(dir), metadata)
if err != nil {
return nil, err
}
var entries = make(fs.DirEntries, 0, 1)
err = f.addDir(ctx, &entries, newDir)
if err != nil {
return nil, err
}
newDir, ok := entries[0].(fs.Directory)
if !ok {
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
}
return newDir, nil
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
do := f.Fs.Features().DirSetModTime
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx, f.cipher.EncryptDirName(dir), modTime)
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
@@ -860,7 +754,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
}
out := make([]fs.Directory, len(dirs))
for i, dir := range dirs {
out[i] = fs.NewDirWrapper(f.cipher.EncryptDirName(dir.Remote()), dir)
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
}
return do(ctx, out)
}
@@ -957,7 +851,7 @@ Usage Example:
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "decode":
out := make([]string, 0, len(arg))
@@ -1096,14 +990,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// newDir returns a dir with the Name decrypted
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(ctx, dir)
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debugf(remote, "Undecryptable dir name: %v", err)
} else {
remote = decryptedRemote
newDir.SetRemote(decryptedRemote)
}
newDir := fs.NewDirWrapper(remote, dir)
return newDir
}
@@ -1281,17 +1175,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx)
}
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// MimeType returns the content type of the Object if
// known, or "" if not
//
@@ -1317,8 +1200,6 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)

View File

@@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
@@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base64"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base32768"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
{Name: name, Key: "no_data_encryption", Value: "true"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})

View File

@@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte {
}
length := len(buf)
padding := n - (length % n)
for range padding {
for i := 0; i < padding; i++ {
buf = append(buf, byte(padding))
}
if (len(buf) % n) != 0 {
@@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) {
if padding == 0 {
return nil, ErrorPaddingTooShort
}
for i := range padding {
for i := 0; i < padding; i++ {
if buf[length-1-i] != byte(padding) {
return nil, ErrorPaddingNotAllTheSame
}

File diff suppressed because it is too large Load Diff

View File

@@ -95,7 +95,7 @@ func TestInternalParseExtensions(t *testing.T) {
wantErr error
}{
{"doc", []string{".doc"}, nil},
{" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil},
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
} {
@@ -479,8 +479,8 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
require.NoError(t, f.Purge(ctx, "trashDir"))
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID
func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
func (f *Fs) InternalTestCopyID(t *testing.T) {
ctx := context.Background()
obj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
@@ -498,7 +498,7 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
}
t.Run("BadID", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/")
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "couldn't find id")
})
@@ -506,71 +506,22 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
t.Run("Directory", func(t *testing.T) {
rootID, err := f.dirCache.RootID(ctx, false)
require.NoError(t, err)
err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/")
err = f.copyID(ctx, rootID, dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "can't moveid directory")
assert.Contains(t, err.Error(), "can't copy directory")
})
t.Run("MoveWithoutDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/")
t.Run("WithoutDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/")
require.NoError(t, err)
checkFile(path.Base(existingFile))
})
t.Run("CopyWithoutDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/")
require.NoError(t, err)
checkFile(path.Base(existingFile))
})
t.Run("MoveWithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt")
t.Run("WithDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile("potato.txt")
})
t.Run("CopyWithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile("potato.txt")
})
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/Query
func (f *Fs) InternalTestQuery(t *testing.T) {
ctx := context.Background()
var err error
t.Run("BadQuery", func(t *testing.T) {
_, err = f.query(ctx, "this is a bad query")
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to execute query")
})
t.Run("NoMatch", func(t *testing.T) {
results, err := f.query(ctx, fmt.Sprintf("name='%s' and name!='%s'", existingSubDir, existingSubDir))
require.NoError(t, err)
assert.Len(t, results, 0)
})
t.Run("GoodQuery", func(t *testing.T) {
pathSegments := strings.Split(existingFile, "/")
var parent string
for _, item := range pathSegments {
// the file name contains ' characters which must be escaped
escapedItem := f.opt.Enc.FromStandardName(item)
escapedItem = strings.ReplaceAll(escapedItem, `\`, `\\`)
escapedItem = strings.ReplaceAll(escapedItem, `'`, `\'`)
results, err := f.query(ctx, fmt.Sprintf("%strashed=false and name='%s'", parent, escapedItem))
require.NoError(t, err)
require.True(t, len(results) > 0)
for _, result := range results {
assert.True(t, len(result.Id) > 0)
assert.Equal(t, result.Name, item)
}
parent = fmt.Sprintf("'%s' in parents and ", results[0].Id)
}
})
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
@@ -578,7 +529,7 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
opt := &filter.Options{}
opt := &filter.Opt{}
err := opt.MaxAge.Set("1h")
assert.NoError(t, err)
flt, err := filter.NewFilter(opt)
@@ -659,8 +610,7 @@ func (f *Fs) InternalTest(t *testing.T) {
})
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID)
t.Run("Query", f.InternalTestQuery)
t.Run("CopyID", f.InternalTestCopyID)
t.Run("AgeQuery", f.InternalTestAgeQuery)
t.Run("ShouldRetry", f.InternalTestShouldRetry)
}

View File

@@ -1,639 +0,0 @@
package drive
import (
"context"
"encoding/json"
"fmt"
"maps"
"strconv"
"strings"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/errcount"
"golang.org/x/sync/errgroup"
drive "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
)
// system metadata keys which this backend owns
var systemMetadataInfo = map[string]fs.MetadataHelp{
"content-type": {
Help: "The MIME type of the file.",
Type: "string",
Example: "text/plain",
},
"mtime": {
Help: "Time of last modification with mS accuracy.",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999Z07:00",
},
"btime": {
Help: "Time of file birth (creation) with mS accuracy. Note that this is only writable on fresh uploads - it can't be written for updates.",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999Z07:00",
},
"copy-requires-writer-permission": {
Help: "Whether the options to copy, print, or download this file, should be disabled for readers and commenters.",
Type: "boolean",
Example: "true",
},
"writers-can-share": {
Help: "Whether users with only writer permission can modify the file's permissions. Not populated and ignored when setting for items in shared drives.",
Type: "boolean",
Example: "false",
},
"viewed-by-me": {
Help: "Whether the file has been viewed by this user.",
Type: "boolean",
Example: "true",
ReadOnly: true,
},
"owner": {
Help: "The owner of the file. Usually an email address. Enable with --drive-metadata-owner.",
Type: "string",
Example: "user@example.com",
},
"permissions": {
Help: "Permissions in a JSON dump of Google drive format. On shared drives these will only be present if they aren't inherited. Enable with --drive-metadata-permissions.",
Type: "JSON",
Example: "{}",
},
"folder-color-rgb": {
Help: "The color for a folder or a shortcut to a folder as an RGB hex string.",
Type: "string",
Example: "881133",
},
"description": {
Help: "A short description of the file.",
Type: "string",
Example: "Contract for signing",
},
"starred": {
Help: "Whether the user has starred the file.",
Type: "boolean",
Example: "false",
},
"labels": {
Help: "Labels attached to this file in a JSON dump of Googled drive format. Enable with --drive-metadata-labels.",
Type: "JSON",
Example: "[]",
},
}
// Extra fields we need to fetch to implement the system metadata above
var metadataFields = googleapi.Field(strings.Join([]string{
"copyRequiresWriterPermission",
"description",
"folderColorRgb",
"hasAugmentedPermissions",
"owners",
"permissionIds",
"permissions",
"properties",
"starred",
"viewedByMe",
"viewedByMeTime",
"writersCanShare",
}, ","))
// Fields we need to read from permissions
var permissionsFields = googleapi.Field(strings.Join([]string{
"*",
"permissionDetails/*",
}, ","))
// getPermission returns permissions for the fileID and permissionID passed in
func (f *Fs) getPermission(ctx context.Context, fileID, permissionID string, useCache bool) (perm *drive.Permission, inherited bool, err error) {
f.permissionsMu.Lock()
defer f.permissionsMu.Unlock()
if useCache {
perm = f.permissions[permissionID]
if perm != nil {
return perm, false, nil
}
}
fs.Debugf(f, "Fetching permission %q", permissionID)
err = f.pacer.Call(func() (bool, error) {
perm, err = f.svc.Permissions.Get(fileID, permissionID).
Fields(permissionsFields).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, false, err
}
inherited = len(perm.PermissionDetails) > 0 && perm.PermissionDetails[0].Inherited
cleanPermission(perm)
// cache the permission
f.permissions[permissionID] = perm
return perm, inherited, err
}
// Set the permissions on the info
func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions []*drive.Permission) (err error) {
errs := errcount.New()
for _, perm := range permissions {
if perm.Role == "owner" {
// ignore owner permissions - these are set with owner
continue
}
cleanPermissionForWrite(perm)
err := f.pacer.Call(func() (bool, error) {
_, err := f.svc.Permissions.Create(info.Id, perm).
SupportsAllDrives(true).
SendNotificationEmail(false).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
fs.Errorf(f, "Failed to set permission %s for %q: %v", perm.Role, perm.EmailAddress, err)
errs.Add(err)
}
}
err = errs.Err("failed to set permission")
if err != nil {
err = fserrors.NoRetryError(err)
}
return err
}
// Clean attributes from permissions which we can't write
func cleanPermissionForWrite(perm *drive.Permission) {
perm.Deleted = false
perm.DisplayName = ""
perm.Id = ""
perm.Kind = ""
perm.PermissionDetails = nil
perm.TeamDrivePermissionDetails = nil
}
// Clean and cache the permission if not already cached
func (f *Fs) cleanAndCachePermission(perm *drive.Permission) {
f.permissionsMu.Lock()
defer f.permissionsMu.Unlock()
cleanPermission(perm)
if _, found := f.permissions[perm.Id]; !found {
f.permissions[perm.Id] = perm
}
}
// Clean fields we don't need to keep from the permission
func cleanPermission(perm *drive.Permission) {
// DisplayName: Output only. The "pretty" name of the value of the
// permission. The following is a list of examples for each type of
// permission: * `user` - User's full name, as defined for their Google
// account, such as "Joe Smith." * `group` - Name of the Google Group,
// such as "The Company Administrators." * `domain` - String domain
// name, such as "thecompany.com." * `anyone` - No `displayName` is
// present.
perm.DisplayName = ""
// Kind: Output only. Identifies what kind of resource this is. Value:
// the fixed string "drive#permission".
perm.Kind = ""
// PermissionDetails: Output only. Details of whether the permissions on
// this shared drive item are inherited or directly on this item. This
// is an output-only field which is present only for shared drive items.
perm.PermissionDetails = nil
// PhotoLink: Output only. A link to the user's profile photo, if
// available.
perm.PhotoLink = ""
// TeamDrivePermissionDetails: Output only. Deprecated: Output only. Use
// `permissionDetails` instead.
perm.TeamDrivePermissionDetails = nil
}
// Fields we need to read from labels
var labelsFields = googleapi.Field(strings.Join([]string{
"*",
}, ","))
// getLabels returns labels for the fileID passed in
func (f *Fs) getLabels(ctx context.Context, fileID string) (labels []*drive.Label, err error) {
fs.Debugf(f, "Fetching labels for %q", fileID)
listLabels := f.svc.Files.ListLabels(fileID).
Fields(labelsFields).
Context(ctx)
for {
var info *drive.LabelList
err = f.pacer.Call(func() (bool, error) {
info, err = listLabels.Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
labels = append(labels, info.Labels...)
if info.NextPageToken == "" {
break
}
listLabels.PageToken(info.NextPageToken)
}
for _, label := range labels {
cleanLabel(label)
}
return labels, nil
}
// Set the labels on the info
func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.Label) (err error) {
if len(labels) == 0 {
return nil
}
req := drive.ModifyLabelsRequest{}
for _, label := range labels {
req.LabelModifications = append(req.LabelModifications, &drive.LabelModification{
FieldModifications: labelFieldsToFieldModifications(label.Fields),
LabelId: label.Id,
})
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.ModifyLabels(info.Id, &req).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to set labels: %w", err)
}
return nil
}
// Convert label fields into something which can set the fields
func labelFieldsToFieldModifications(fields map[string]drive.LabelField) (out []*drive.LabelFieldModification) {
for id, field := range fields {
var emails []string
for _, user := range field.User {
emails = append(emails, user.EmailAddress)
}
out = append(out, &drive.LabelFieldModification{
// FieldId: The ID of the field to be modified.
FieldId: id,
// SetDateValues: Replaces the value of a dateString Field with these
// new values. The string must be in the RFC 3339 full-date format:
// YYYY-MM-DD.
SetDateValues: field.DateString,
// SetIntegerValues: Replaces the value of an `integer` field with these
// new values.
SetIntegerValues: field.Integer,
// SetSelectionValues: Replaces a `selection` field with these new
// values.
SetSelectionValues: field.Selection,
// SetTextValues: Sets the value of a `text` field.
SetTextValues: field.Text,
// SetUserValues: Replaces a `user` field with these new values. The
// values must be valid email addresses.
SetUserValues: emails,
})
}
return out
}
// Clean fields we don't need to keep from the label
func cleanLabel(label *drive.Label) {
// Kind: This is always drive#label
label.Kind = ""
for name, field := range label.Fields {
// Kind: This is always drive#labelField.
field.Kind = ""
// Note the fields are copies so we need to write them
// back to the map
label.Fields[name] = field
}
}
// Parse the metadata from drive item
//
// It should return nil if there is no Metadata
func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err error) {
metadata := make(fs.Metadata, 16)
// Dump user metadata first as it overrides system metadata
maps.Copy(metadata, info.Properties)
// System metadata
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
metadata["writers-can-share"] = fmt.Sprint(info.WritersCanShare)
metadata["viewed-by-me"] = fmt.Sprint(info.ViewedByMe)
metadata["content-type"] = info.MimeType
// Owners: Output only. The owner of this file. Only certain legacy
// files may have more than one owner. This field isn't populated for
// items in shared drives.
if o.fs.opt.MetadataOwner.IsSet(rwRead) && len(info.Owners) > 0 {
user := info.Owners[0]
if len(info.Owners) > 1 {
fs.Logf(o, "Ignoring more than 1 owner")
}
if user != nil {
id := user.EmailAddress
if id == "" {
id = user.DisplayName
}
metadata["owner"] = id
}
}
if o.fs.opt.MetadataPermissions.IsSet(rwRead) {
// We only write permissions out if they are not inherited.
//
// On My Drives permissions seem to be attached to every item
// so they will always be written out.
//
// On Shared Drives only non-inherited permissions will be
// written out.
// To read the inherited permissions flag will mean we need to
// read the permissions for each object and the cache will be
// useless. However shared drives don't return permissions
// only permissionIds so will need to fetch them for each
// object. We use HasAugmentedPermissions to see if there are
// special permissions before fetching them to save transactions.
// HasAugmentedPermissions: Output only. Whether there are permissions
// directly on this file. This field is only populated for items in
// shared drives.
if o.fs.isTeamDrive && !info.HasAugmentedPermissions {
// Don't process permissions if there aren't any specifically set
fs.Debugf(o, "Ignoring %d permissions and %d permissionIds as is shared drive with hasAugmentedPermissions false", len(info.Permissions), len(info.PermissionIds))
info.Permissions = nil
info.PermissionIds = nil
}
// PermissionIds: Output only. List of permission IDs for users with
// access to this file.
//
// Only process these if we have no Permissions
if len(info.PermissionIds) > 0 && len(info.Permissions) == 0 {
info.Permissions = make([]*drive.Permission, 0, len(info.PermissionIds))
g, gCtx := errgroup.WithContext(ctx)
g.SetLimit(o.fs.ci.Checkers)
var mu sync.Mutex // protect the info.Permissions from concurrent writes
for _, permissionID := range info.PermissionIds {
permissionID := permissionID
g.Go(func() error {
// must fetch the team drive ones individually to check the inherited flag
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
if err != nil {
return fmt.Errorf("failed to read permission: %w", err)
}
// Don't write inherited permissions out
if inherited {
return nil
}
// Don't write owner role out - these are covered by the owner metadata
if perm.Role == "owner" {
return nil
}
mu.Lock()
info.Permissions = append(info.Permissions, perm)
mu.Unlock()
return nil
})
}
err = g.Wait()
if err != nil {
return err
}
} else {
// Clean the fetched permissions
for _, perm := range info.Permissions {
o.fs.cleanAndCachePermission(perm)
}
}
// Permissions: Output only. The full list of permissions for the file.
// This is only available if the requesting user can share the file. Not
// populated for items in shared drives.
if len(info.Permissions) > 0 {
buf, err := json.Marshal(info.Permissions)
if err != nil {
return fmt.Errorf("failed to marshal permissions: %w", err)
}
metadata["permissions"] = string(buf)
}
// Permission propagation
// https://developers.google.com/drive/api/guides/manage-sharing#permission-propagation
// Leads me to believe that in non shared drives, permissions
// are added to each item when you set permissions for a
// folder whereas in shared drives they are inherited and
// placed on the item directly.
}
if info.FolderColorRgb != "" {
metadata["folder-color-rgb"] = info.FolderColorRgb
}
if info.Description != "" {
metadata["description"] = info.Description
}
metadata["starred"] = fmt.Sprint(info.Starred)
metadata["btime"] = info.CreatedTime
metadata["mtime"] = info.ModifiedTime
if o.fs.opt.MetadataLabels.IsSet(rwRead) {
// FIXME would be really nice if we knew if files had labels
// before listing but we need to know all possible label IDs
// to get it in the listing.
labels, err := o.fs.getLabels(ctx, actualID(info.Id))
if err != nil {
return fmt.Errorf("failed to fetch labels: %w", err)
}
buf, err := json.Marshal(labels)
if err != nil {
return fmt.Errorf("failed to marshal labels: %w", err)
}
metadata["labels"] = string(buf)
}
o.metadata = &metadata
return nil
}
// Set the owner on the info
func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err error) {
perm := drive.Permission{
Role: "owner",
EmailAddress: owner,
// Type: The type of the grantee. Valid values are: * `user` * `group` *
// `domain` * `anyone` When creating a permission, if `type` is `user`
// or `group`, you must provide an `emailAddress` for the user or group.
// When `type` is `domain`, you must provide a `domain`. There isn't
// extra information required for an `anyone` type.
Type: "user",
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Permissions.Create(info.Id, &perm).
SupportsAllDrives(true).
TransferOwnership(true).
// SendNotificationEmail(false). - required apparently!
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to set owner: %w", err)
}
return nil
}
// Call back to set metadata that can't be set on the upload/update
//
// The *drive.File passed in holds the current state of the drive.File
// and this should update it with any modifications.
type updateMetadataFn func(context.Context, *drive.File) error
// read the metadata from meta and write it into updateInfo
//
// update should be true if this is being used to create metadata for
// an update/PATCH call as the rules on what can be updated are
// slightly different there.
//
// It returns a callback which should be called to finish the updates
// after the data is uploaded.
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) {
callbackFns := []updateMetadataFn{}
callback = func(ctx context.Context, info *drive.File) error {
for _, fn := range callbackFns {
err := fn(ctx, info)
if err != nil {
return err
}
}
return nil
}
// merge metadata into request and user metadata
for k, v := range meta {
k, v := k, v
// parse a boolean from v and write into out
parseBool := func(out *bool) error {
b, err := strconv.ParseBool(v)
if err != nil {
return fmt.Errorf("can't parse metadata %q = %q: %w", k, v, err)
}
*out = b
return nil
}
switch k {
case "copy-requires-writer-permission":
if isFolder {
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
return nil, err
}
case "writers-can-share":
if !f.isTeamDrive {
if err := parseBool(&updateInfo.WritersCanShare); err != nil {
return nil, err
}
} else {
fs.Debugf(f, "Ignoring %s=%s as can't set on shared drives", k, v)
}
case "viewed-by-me":
// Can't write this
case "content-type":
updateInfo.MimeType = v
case "owner":
if !f.opt.MetadataOwner.IsSet(rwWrite) {
continue
}
// Can't set Owner on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
err := f.setOwner(ctx, info, v)
if err != nil && f.opt.MetadataOwner.IsSet(rwFailOK) {
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
return nil
}
return err
})
case "permissions":
if !f.opt.MetadataPermissions.IsSet(rwWrite) {
continue
}
var perms []*drive.Permission
err := json.Unmarshal([]byte(v), &perms)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal permissions: %w", err)
}
// Can't set Permissions on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
err := f.setPermissions(ctx, info, perms)
if err != nil && f.opt.MetadataPermissions.IsSet(rwFailOK) {
// We've already logged the permissions errors individually here
fs.Debugf(f, "Ignoring error as failok is set: %v", err)
return nil
}
return err
})
case "labels":
if !f.opt.MetadataLabels.IsSet(rwWrite) {
continue
}
var labels []*drive.Label
err := json.Unmarshal([]byte(v), &labels)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal labels: %w", err)
}
// Can't set Labels on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
err := f.setLabels(ctx, info, labels)
if err != nil && f.opt.MetadataLabels.IsSet(rwFailOK) {
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
return nil
}
return err
})
case "folder-color-rgb":
updateInfo.FolderColorRgb = v
case "description":
updateInfo.Description = v
case "starred":
if err := parseBool(&updateInfo.Starred); err != nil {
return nil, err
}
case "btime":
if update {
fs.Debugf(f, "Skipping btime metadata as can't update it on an existing file: %v", v)
} else {
updateInfo.CreatedTime = v
}
case "mtime":
updateInfo.ModifiedTime = v
default:
if updateInfo.Properties == nil {
updateInfo.Properties = make(map[string]string, 1)
}
updateInfo.Properties[k] = v
}
}
return callback, nil
}
// Fetch metadata and update updateInfo if --metadata is in use
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *drive.File, update bool) (callback updateMetadataFn, err error) {
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
if err != nil {
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
}
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false)
if err != nil {
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
}
return callback, nil
}

View File

@@ -177,7 +177,10 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
if start >= rx.ContentLength {
break
}
reqSize = min(rx.ContentLength-start, int64(rx.f.opt.ChunkSize))
reqSize = rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
} else {
// If size unknown read into buffer

View File

@@ -8,22 +8,129 @@ package dropbox
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
)
const (
maxBatchSize = 1000 // max size the batch can be
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
defaultBatchSizeAsync = 100 // default batch size if async
)
// batcher holds info about the current items waiting for upload
type batcher struct {
f *Fs // Fs this batch is part of
mode string // configured batch mode
size int // maximum size for batch
timeout time.Duration // idle timeout for batch
async bool // whether we are using async batching
in chan batcherRequest // incoming items to batch
closed chan struct{} // close to indicate batcher shut down
atexit atexit.FnHandle // atexit handle
shutOnce sync.Once // make sure we shutdown once only
wg sync.WaitGroup // wait for shutdown
}
// batcherRequest holds an incoming request with a place for a reply
type batcherRequest struct {
commitInfo *files.UploadSessionFinishArg
result chan<- batcherResponse
}
// Return true if batcherRequest is the quit request
func (br *batcherRequest) isQuit() bool {
return br.commitInfo == nil
}
// Send this to get the engine to quit
var quitRequest = batcherRequest{}
// batcherResponse holds a response to be delivered to clients waiting
// for a batch to complete.
type batcherResponse struct {
err error
entry *files.FileMetadata
}
// newBatcher creates a new batcher structure
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
switch mode {
case "sync":
if size <= 0 {
ci := fs.GetConfig(ctx)
size = ci.Transfers
}
if timeout <= 0 {
timeout = defaultTimeoutSync
}
case "async":
if size <= 0 {
size = defaultBatchSizeAsync
}
if timeout <= 0 {
timeout = defaultTimeoutAsync
}
async = true
case "off":
size = 0
default:
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
f: f,
mode: mode,
size: size,
timeout: timeout,
async: async,
in: make(chan batcherRequest, size),
closed: make(chan struct{}),
}
if b.Batching() {
b.atexit = atexit.Register(b.Shutdown)
b.wg.Add(1)
go b.commitLoop(context.Background())
}
return b, nil
}
// Batching returns true if batching is active
func (b *batcher) Batching() bool {
return b.size > 0
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete
func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
var arg = &files.UploadSessionFinishBatchArg{
Entries: items,
}
err = f.pacer.Call(func() (bool, error) {
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
if retry, err := shouldRetryExclude(ctx, err); !retry {
return retry, err
err = b.f.pacer.Call(func() (bool, error) {
complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything except the excluded errors
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {
@@ -32,10 +139,23 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
return complete, nil
}
// Called by the batcher to commit a batch
func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []*files.FileMetadata, errors []error) (err error) {
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && !signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}
}
}
}()
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
fs.Debugf(b.f, "Committing %s", desc)
// finalise the batch getting either a result or a job id to poll
complete, err := f.finishBatch(ctx, items)
complete, err := b.finishBatch(ctx, items)
if err != nil {
return err
}
@@ -46,13 +166,19 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
}
// Format results for return
// Report results to clients
var (
errorTag = ""
errorCount = 0
)
for i := range results {
item := entries[i]
resp := batcherResponse{}
if item.Tag == "success" {
results[i] = item.Success
resp.entry = item.Success
} else {
errorTag := item.Tag
errorCount++
errorTag = item.Tag
if item.Failure != nil {
errorTag = item.Failure.Tag
if item.Failure.LookupFailed != nil {
@@ -65,9 +191,112 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
errorTag += "/" + item.Failure.PropertiesError.Tag
}
}
errors[i] = fmt.Errorf("upload failed: %s", errorTag)
resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
}
}
// Show signalled so no need to report error to clients from now on
signalled = true
// Report an error if any failed in the batch
if errorTag != "" {
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)
return nil
}
// commitLoop runs the commit engine in the background
func (b *batcher) commitLoop(ctx context.Context) {
var (
items []*files.UploadSessionFinishArg // current batch of uncommitted files
results []chan<- batcherResponse // current batch of clients awaiting results
idleTimer = time.NewTimer(b.timeout)
commit = func() {
err := b.commitBatch(ctx, items, results)
if err != nil {
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
}
items, results = nil, nil
}
)
defer b.wg.Done()
defer idleTimer.Stop()
idleTimer.Stop()
outer:
for {
select {
case req := <-b.in:
if req.isQuit() {
break outer
}
items = append(items, req.commitInfo)
results = append(results, req.result)
idleTimer.Stop()
if len(items) >= b.size {
commit()
} else {
idleTimer.Reset(b.timeout)
}
case <-idleTimer.C:
if len(items) > 0 {
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
commit()
}
}
}
// commit any remaining items
if len(items) > 0 {
commit()
}
}
// Shutdown finishes any pending batches then shuts everything down
//
// Can be called from atexit handler
func (b *batcher) Shutdown() {
if !b.Batching() {
return
}
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Committing uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message
//
// Note that we don't close b.in because that will
// cause write to closed channel in Commit when we are
// exiting due to a signal.
b.in <- quitRequest
b.wg.Wait()
})
}
// Commit commits the file using a batch call, first adding it to the
// batch and then waiting for the batch to complete in a synchronous
// way if async is not set.
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
select {
case <-b.closed:
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
default:
}
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
resp := make(chan batcherResponse, 1)
b.in <- batcherRequest{
commitInfo: commitInfo,
result: resp,
}
// If running async then don't wait for the result
if b.async {
return nil, nil
}
result := <-resp
return result.entry, result.err
}

View File

@@ -55,7 +55,10 @@ func (d *digest) Write(p []byte) (n int, err error) {
n = len(p)
for len(p) > 0 {
d.writtenMore = true
toWrite := min(bytesPerBlock-d.n, len(p))
toWrite := bytesPerBlock - d.n
if toWrite > len(p) {
toWrite = len(p)
}
_, err = d.blockHash.Write(p[:toWrite])
if err != nil {
panic(hashReturnedError)

View File

@@ -11,7 +11,7 @@ import (
func testChunk(t *testing.T, chunk int) {
data := make([]byte, chunk)
for i := range chunk {
for i := 0; i < chunk; i++ {
data[i] = 'A'
}
for _, test := range []struct {

View File

@@ -47,8 +47,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
@@ -92,12 +90,9 @@ const (
maxFileNameLength = 255
)
type exportAPIFormat string
type exportExtension string // dotless
var (
// Description of how to auth for this app
dropboxConfig = &oauthutil.Config{
dropboxConfig = &oauth2.Config{
Scopes: []string{
"files.metadata.write",
"files.content.write",
@@ -112,8 +107,7 @@ var (
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
// },
AuthURL: dropbox.OAuthEndpoint("").AuthURL,
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
Endpoint: dropbox.OAuthEndpoint(""),
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -127,28 +121,10 @@ var (
// Errors
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
// Configure the batcher
defaultBatcherOptions = batcher.Options{
MaxBatchSize: 1000,
DefaultTimeoutSync: 500 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 100,
}
exportKnownAPIFormats = map[exportAPIFormat]exportExtension{
"markdown": "md",
"html": "html",
}
// Populated based on exportKnownAPIFormats
exportKnownExtensions = map[exportExtension]exportAPIFormat{}
paperExtension = ".paper"
paperTemplateExtension = ".papert"
)
// Gets an oauth config with the right scopes
func getOauthConfig(m configmap.Mapper) *oauthutil.Config {
func getOauthConfig(m configmap.Mapper) *oauth2.Config {
// If not impersonating, use standard scopes
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
return dropboxConfig
@@ -176,7 +152,7 @@ func init() {
},
})
},
Options: append(append(oauthutil.SharedOptions, []fs.Option{{
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size (< %v).
@@ -231,12 +207,71 @@ are supported.
Note that we don't unmount the shared folder afterwards so the
--dropbox-shared-folders can be omitted after the first use of a particular
shared folder.
See also --dropbox-root-namespace for an alternative way to work with shared
folders.`,
shared folder.`,
Default: false,
Advanced: true,
}, {
Name: "batch_mode",
Help: `Upload file batching sync|async|off.
This sets the batch mode used by rclone.
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
This has 3 possible values
- off - no batching
- sync - batch uploads and check completion (default)
- async - batch upload and don't check completion
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
`,
Default: "sync",
Advanced: true,
}, {
Name: "batch_size",
Help: `Max number of files in upload batch.
This sets the batch size of files to upload. It has to be less than 1000.
By default this is 0 which means rclone which calculate the batch size
depending on the setting of batch_mode.
- batch_mode: async - default batch_size is 100
- batch_mode: sync - default batch_size is the same as --transfers
- batch_mode: off - not in use
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
Setting this is a great idea if you are uploading lots of small files
as it will make them a lot quicker. You can use --transfers 32 to
maximise throughput.
`,
Default: 0,
Advanced: true,
}, {
Name: "batch_timeout",
Help: `Max time to allow an idle upload batch before uploading.
If an upload batch is idle for more than this long then it will be
uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 10s
- batch_mode: sync - default batch_timeout is 500ms
- batch_mode: off - not in use
`,
Default: fs.Duration(0),
Advanced: true,
}, {
Name: "batch_commit_timeout",
Help: `Max time to wait for a batch to finish committing`,
Default: fs.Duration(10 * time.Minute),
Advanced: true,
}, {
Name: "pacer_min_sleep",
Default: defaultMinSleep,
@@ -255,66 +290,23 @@ folders.`,
encoder.EncodeDel |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8,
}, {
Name: "root_namespace",
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
Default: "",
Advanced: true,
}, {
Name: "export_formats",
Help: `Comma separated list of preferred formats for exporting files
Certain Dropbox files can only be accessed by exporting them to another format.
These include Dropbox Paper documents.
For each such file, rclone will choose the first format on this list that Dropbox
considers valid. If none is valid, it will choose Dropbox's default format.
Known formats include: "html", "md" (markdown)`,
Default: fs.CommaSepList{"html", "md"},
Advanced: true,
}, {
Name: "skip_exports",
Help: "Skip exportable files in all listings.\n\nIf given, exportable files practically become invisible to rclone.",
Default: false,
Advanced: true,
}, {
Name: "show_all_exports",
Default: false,
Help: `Show all exportable files in listings.
Adding this flag will allow all exportable files to be server side copied.
Note that rclone doesn't add extensions to the exportable file names in this mode.
Do **not** use this flag when trying to download exportable files - rclone
will fail to download them.
`,
Advanced: true,
},
}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
}}...),
})
for apiFormat, ext := range exportKnownAPIFormats {
exportKnownExtensions[ext] = apiFormat
}
}
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"`
RootNsid string `config:"root_namespace"`
ExportFormats fs.CommaSepList `config:"export_formats"`
SkipExports bool `config:"skip_exports"`
ShowAllExports bool `config:"show_all_exports"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote dropbox server
@@ -333,19 +325,9 @@ type Fs struct {
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
exportExts []exportExtension
batcher *batcher // batch builder
}
type exportType int
const (
notExport exportType = iota // a regular file
exportHide // should be hidden
exportListOnly // listable, but can't export
exportExportable // can export
)
// Object describes a dropbox object
//
// Dropbox Objects always have full metadata
@@ -357,9 +339,6 @@ type Object struct {
bytes int64 // size of the object
modTime time.Time // time it was last modified
hash string // content_hash of the object
exportType exportType
exportAPIFormat exportAPIFormat
}
// Name of the remote (as passed into NewFs)
@@ -382,46 +361,32 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// Some specific errors which should be excluded from retries
func shouldRetryExclude(ctx context.Context, err error) (bool, error) {
if err == nil {
return false, err
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
// First check for specific errors
//
// These come back from the SDK in a whole host of different
// error types, but there doesn't seem to be a consistent way
// of reading the error cause, so here we just check using the
// error string which isn't perfect but does the job.
if err == nil {
return false, err
}
errString := err.Error()
// First check for specific errors
if strings.Contains(errString, "insufficient_space") {
return false, fserrors.FatalError(err)
} else if strings.Contains(errString, "malformed_path") {
return false, fserrors.NoRetryError(err)
}
return true, err
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if retry, err := shouldRetryExclude(ctx, err); !retry {
return retry, err
}
// Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter)
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
}
return true, err
}
// Keep old behavior for backward compatibility
errString := err.Error()
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
return true, err
}
@@ -466,7 +431,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
oldToken = strings.TrimSpace(oldToken)
if ok && oldToken != "" && oldToken[0] != '{' {
fs.Infof(name, "Converting token to new format")
newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
if err != nil {
return nil, fmt.Errorf("NewFS convert token: %w", err)
@@ -486,11 +451,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
batcherOptions := defaultBatcherOptions
batcherOptions.Mode = f.opt.BatchMode
batcherOptions.Size = f.opt.BatchSize
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
if err != nil {
return nil, err
}
@@ -500,14 +461,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
HeaderGenerator: f.headerGenerator,
}
for _, e := range opt.ExportFormats {
ext := exportExtension(e)
if exportKnownExtensions[ext] == "" {
return nil, fmt.Errorf("dropbox: unknown export format '%s'", e)
}
f.exportExts = append(f.exportExts, ext)
}
// unauthorized config for endpoints that fail with auth
ucfg := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
@@ -525,15 +478,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
members := []*team.UserSelectorArg{&user}
args := team.NewMembersGetInfoArgs(members)
memberIDs, err := f.team.MembersGetInfo(args)
memberIds, err := f.team.MembersGetInfo(args)
if err != nil {
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
}
if len(memberIDs) == 0 || memberIDs[0].MemberInfo == nil || memberIDs[0].MemberInfo.Profile == nil {
if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
}
cfg.AsMemberID = memberIDs[0].MemberInfo.Profile.MemberProfile.TeamMemberId
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
}
f.srv = files.New(cfg)
@@ -599,11 +552,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.features.Fill(ctx, f)
if f.opt.RootNsid != "" {
f.ns = f.opt.RootNsid
fs.Debugf(f, "Overriding root namespace to %q", f.ns)
} else if strings.HasPrefix(root, "/") {
// If root starts with / then use the actual root
// If root starts with / then use the actual root
if strings.HasPrefix(root, "/") {
var acc *users.FullAccount
err = f.pacer.Call(func() (bool, error) {
acc, err = f.users.GetCurrentAccount()
@@ -660,126 +610,38 @@ func (f *Fs) setRoot(root string) {
}
}
type getMetadataResult struct {
entry files.IsMetadata
notFound bool
err error
}
// getMetadata gets the metadata for a file or directory
func (f *Fs) getMetadata(ctx context.Context, objPath string) (res getMetadataResult) {
res.err = f.pacer.Call(func() (bool, error) {
res.entry, res.err = f.srv.GetMetadata(&files.GetMetadataArg{
func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMetadata, notFound bool, err error) {
err = f.pacer.Call(func() (bool, error) {
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
Path: f.opt.Enc.FromStandardPath(objPath),
})
return shouldRetry(ctx, res.err)
return shouldRetry(ctx, err)
})
if res.err != nil {
switch e := res.err.(type) {
if err != nil {
switch e := err.(type) {
case files.GetMetadataAPIError:
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
res.notFound = true
res.err = nil
notFound = true
err = nil
}
}
}
return
}
// Get metadata such that the result would be exported with the given extension
// Return a channel that will eventually receive the metadata
func (f *Fs) getMetadataForExt(ctx context.Context, filePath string, wantExportExtension exportExtension) chan getMetadataResult {
ch := make(chan getMetadataResult, 1)
wantDownloadable := (wantExportExtension == "")
go func() {
defer close(ch)
res := f.getMetadata(ctx, filePath)
info, ok := res.entry.(*files.FileMetadata)
if !ok { // Can't check anything about file, just return what we have
ch <- res
return
}
// Return notFound if downloadability or extension doesn't match
if wantDownloadable != info.IsDownloadable {
ch <- getMetadataResult{notFound: true}
return
}
if !info.IsDownloadable {
_, ext := f.chooseExportFormat(info)
if ext != wantExportExtension {
ch <- getMetadataResult{notFound: true}
return
}
}
// Return our real result or error
ch <- res
}()
return ch
}
// For a given rclone-path, figure out what the Dropbox-path may be, in order of preference.
// Multiple paths might be plausible, due to export path munging.
func (f *Fs) possibleMetadatas(ctx context.Context, filePath string) (ret []<-chan getMetadataResult) {
ret = []<-chan getMetadataResult{}
// Prefer an exact match
ret = append(ret, f.getMetadataForExt(ctx, filePath, ""))
// Check if we're plausibly an export path, otherwise we're done
if f.opt.SkipExports || f.opt.ShowAllExports {
return
}
dotted := path.Ext(filePath)
if dotted == "" {
return
}
ext := exportExtension(dotted[1:])
if exportKnownExtensions[ext] == "" {
return
}
// We might be an export path! Try all possibilities
base := strings.TrimSuffix(filePath, dotted)
// `foo.papert.md` will only come from `foo.papert`. Never check something like `foo.papert.paper`
if strings.HasSuffix(base, paperTemplateExtension) {
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
return
}
// Otherwise, try both `foo.md` coming from `foo`, or from `foo.paper`
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
ret = append(ret, f.getMetadataForExt(ctx, base+paperExtension, ext))
return
}
// getFileMetadata gets the metadata for a file
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileMetadata, error) {
var res getMetadataResult
// Try all possible metadatas
possibleMetadatas := f.possibleMetadatas(ctx, filePath)
for _, ch := range possibleMetadatas {
res = <-ch
if res.err != nil {
return nil, res.err
}
if !res.notFound {
break
}
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *files.FileMetadata, err error) {
entry, notFound, err := f.getMetadata(ctx, filePath)
if err != nil {
return nil, err
}
if res.notFound {
if notFound {
return nil, fs.ErrorObjectNotFound
}
fileInfo, ok := res.entry.(*files.FileMetadata)
fileInfo, ok := entry.(*files.FileMetadata)
if !ok {
if _, ok = res.entry.(*files.FolderMetadata); ok {
if _, ok = entry.(*files.FolderMetadata); ok {
return nil, fs.ErrorIsDir
}
return nil, fs.ErrorNotAFile
@@ -788,15 +650,15 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileM
}
// getDirMetadata gets the metadata for a directory
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (*files.FolderMetadata, error) {
res := f.getMetadata(ctx, dirPath)
if res.err != nil {
return nil, res.err
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files.FolderMetadata, err error) {
entry, notFound, err := f.getMetadata(ctx, dirPath)
if err != nil {
return nil, err
}
if res.notFound {
if notFound {
return nil, fs.ErrorDirNotFound
}
dirInfo, ok := res.entry.(*files.FolderMetadata)
dirInfo, ok := entry.(*files.FolderMetadata)
if !ok {
return nil, fs.ErrorIsFile
}
@@ -832,7 +694,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
}
// listSharedFolders lists all available shared folders mounted and not mounted
// listSharedFoldersApi lists all available shared folders mounted and not mounted
// we'll need the id later so we have to return them in original format
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
started := false
@@ -996,15 +858,16 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var res *files.ListFolderResult
for {
if !started {
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(root))
arg.Recursive = false
arg.Limit = 1000
arg := files.ListFolderArg{
Path: f.opt.Enc.FromStandardPath(root),
Recursive: false,
Limit: 1000,
}
if root == "/" {
arg.Path = "" // Specify root folder as empty string
}
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(arg)
res, err = f.srv.ListFolder(&arg)
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1057,9 +920,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if err != nil {
return nil, err
}
if o.(*Object).exportType.listable() {
entries = append(entries, o)
}
entries = append(entries, o)
}
}
if !res.HasMore {
@@ -1135,7 +996,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
if root == "/" {
return errors.New("can't remove root directory")
}
encRoot := f.opt.Enc.FromStandardPath(root)
if check {
// check directory exists
@@ -1144,15 +1004,18 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return fmt.Errorf("Rmdir: %w", err)
}
root = f.opt.Enc.FromStandardPath(root)
// check directory empty
arg := files.NewListFolderArg(encRoot)
arg.Recursive = false
arg := files.ListFolderArg{
Path: root,
Recursive: false,
}
if root == "/" {
arg.Path = "" // Specify root folder as empty string
}
var res *files.ListFolderResult
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(arg)
res, err = f.srv.ListFolder(&arg)
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1165,7 +1028,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
// remove it
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: encRoot})
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
return shouldRetry(ctx, err)
})
return err
@@ -1195,20 +1058,13 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Find and remove existing object
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
if err != nil {
return nil, err
}
defer cleanup(&err)
// Temporary Object under construction
dstObj := &Object{
fs: f,
@@ -1222,6 +1078,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
},
}
var err error
var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) {
result, err = f.srv.CopyV2(&arg)
@@ -1333,16 +1190,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return shouldRetry(ctx, err)
})
if err != nil && createArg.Settings.Expires != nil && strings.Contains(err.Error(), sharing.SharedLinkSettingsErrorNotAuthorized) {
// Some plans can't create links with expiry
fs.Debugf(absPath, "can't create link with expiry, trying without")
createArg.Settings.Expires = nil
err = f.pacer.Call(func() (bool, error) {
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
return shouldRetry(ctx, err)
})
}
if err != nil && strings.Contains(err.Error(),
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
@@ -1434,21 +1281,18 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return nil, err
}
var total uint64
used := q.Used
if q.Allocation != nil {
if q.Allocation.Individual != nil {
total += q.Allocation.Individual.Allocated
}
if q.Allocation.Team != nil {
total += q.Allocation.Team.Allocated
// Override used with Team.Used as this includes q.Used already
used = q.Allocation.Team.Used
}
}
usage = &fs.Usage{
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
Used: fs.NewUsageValue(int64(used)), // bytes in use
Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
Used: fs.NewUsageValue(int64(q.Used)), // bytes in use
Free: fs.NewUsageValue(int64(total - q.Used)), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
@@ -1507,14 +1351,16 @@ func (f *Fs) changeNotifyCursor(ctx context.Context) (cursor string, err error)
var startCursor *files.ListFolderGetLatestCursorResult
err = f.pacer.Call(func() (bool, error) {
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(f.slashRoot))
arg.Recursive = true
arg := files.ListFolderArg{
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
Recursive: true,
}
if arg.Path == "/" {
arg.Path = ""
}
startCursor, err = f.srv.ListFolderGetLatestCursor(arg)
startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
return shouldRetry(ctx, err)
})
@@ -1618,50 +1464,8 @@ func (f *Fs) Shutdown(ctx context.Context) error {
return nil
}
func (f *Fs) chooseExportFormat(info *files.FileMetadata) (exportAPIFormat, exportExtension) {
// Find API export formats Dropbox supports for this file
// Sometimes Dropbox lists a format in ExportAs but not ExportOptions, so check both
ei := info.ExportInfo
dropboxFormatStrings := append([]string{ei.ExportAs}, ei.ExportOptions...)
// Find which extensions these correspond to
exportExtensions := map[exportExtension]exportAPIFormat{}
var dropboxPreferredAPIFormat exportAPIFormat
var dropboxPreferredExtension exportExtension
for _, format := range dropboxFormatStrings {
apiFormat := exportAPIFormat(format)
// Only consider formats we know about
if ext, ok := exportKnownAPIFormats[apiFormat]; ok {
if dropboxPreferredAPIFormat == "" {
dropboxPreferredAPIFormat = apiFormat
dropboxPreferredExtension = ext
}
exportExtensions[ext] = apiFormat
}
}
// See if the user picked a valid extension
for _, ext := range f.exportExts {
if apiFormat, ok := exportExtensions[ext]; ok {
return apiFormat, ext
}
}
// If no matches, prefer the first valid format Dropbox lists
return dropboxPreferredAPIFormat, dropboxPreferredExtension
}
// ------------------------------------------------------------
func (et exportType) listable() bool {
return et != exportHide
}
// something we should _try_ to export
func (et exportType) exportable() bool {
return et == exportExportable || et == exportListOnly
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
@@ -1705,32 +1509,6 @@ func (o *Object) Size() int64 {
return o.bytes
}
func (o *Object) setMetadataForExport(info *files.FileMetadata) {
o.bytes = -1
o.hash = ""
if o.fs.opt.SkipExports {
o.exportType = exportHide
return
}
if o.fs.opt.ShowAllExports {
o.exportType = exportListOnly
return
}
var exportExt exportExtension
o.exportAPIFormat, exportExt = o.fs.chooseExportFormat(info)
if o.exportAPIFormat == "" {
o.exportType = exportHide
} else {
o.exportType = exportExportable
// get rid of any paper extension, if present
o.remote = strings.TrimSuffix(o.remote, paperExtension)
// add the export extension
o.remote += "." + string(exportExt)
}
}
// setMetadataFromEntry sets the fs data from a files.FileMetadata
//
// This isn't a complete set of metadata and has an inaccurate date
@@ -1739,10 +1517,6 @@ func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
o.bytes = int64(info.Size)
o.modTime = info.ClientModified
o.hash = info.ContentHash
if !info.IsDownloadable {
o.setMetadataForExport(info)
}
return nil
}
@@ -1806,27 +1580,6 @@ func (o *Object) Storable() bool {
return true
}
func (o *Object) export(ctx context.Context) (in io.ReadCloser, err error) {
if o.exportType == exportListOnly || o.exportAPIFormat == "" {
fs.Debugf(o.remote, "No export format found")
return nil, fs.ErrorObjectNotFound
}
arg := files.ExportArg{Path: o.id, ExportFormat: string(o.exportAPIFormat)}
var exportResult *files.ExportResult
err = o.fs.pacer.Call(func() (bool, error) {
exportResult, in, err = o.fs.srv.Export(&arg)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
o.bytes = int64(exportResult.ExportMetadata.Size)
o.hash = exportResult.ExportMetadata.ExportHash
return
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.SharedFiles {
@@ -1846,10 +1599,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return
}
if o.exportType.exportable() {
return o.export(ctx)
}
fs.FixRangeOption(options, o.bytes)
headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{
@@ -1973,15 +1722,19 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
// If we are batching then we should have written all the data now
// store the commit info now for a batch commit
if o.fs.batcher.Batching() {
return o.fs.batcher.Commit(ctx, o.remote, args)
return o.fs.batcher.Commit(ctx, args)
}
err = o.fs.pacer.Call(func() (bool, error) {
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
if retry, err := shouldRetryExclude(ctx, err); !retry {
return retry, err
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything except the excluded errors
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {

View File

@@ -1,16 +1,9 @@
package dropbox
import (
"context"
"io"
"strings"
"testing"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestInternalCheckPathLength(t *testing.T) {
@@ -49,54 +42,3 @@ func TestInternalCheckPathLength(t *testing.T) {
assert.Equal(t, test.ok, err == nil, test.in)
}
}
func (f *Fs) importPaperForTest(t *testing.T) {
content := `# test doc
Lorem ipsum __dolor__ sit amet
[link](http://google.com)
`
arg := files.PaperCreateArg{
Path: f.slashRootSlash + "export.paper",
ImportFormat: &files.ImportFormat{Tagged: dropbox.Tagged{Tag: files.ImportFormatMarkdown}},
}
var err error
err = f.pacer.Call(func() (bool, error) {
reader := strings.NewReader(content)
_, err = f.srv.PaperCreate(&arg, reader)
return shouldRetry(context.Background(), err)
})
require.NoError(t, err)
}
func (f *Fs) InternalTestPaperExport(t *testing.T) {
ctx := context.Background()
f.importPaperForTest(t)
f.exportExts = []exportExtension{"html"}
obj, err := f.NewObject(ctx, "export.html")
require.NoError(t, err)
rc, err := obj.Open(ctx)
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
buf, err := io.ReadAll(rc)
require.NoError(t, err)
text := string(buf)
for _, excerpt := range []string{
"Lorem ipsum",
"<b>dolor</b>",
`href="http://google.com"`,
} {
require.Contains(t, text, excerpt)
}
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PaperExport", f.InternalTestPaperExport)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -28,14 +28,14 @@ var retryErrorCodes = []int{
509, // Bandwidth Limit Exceeded
}
var errorRegex = regexp.MustCompile(`#(\d{1,3})`)
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
func parseFichierError(err error) int {
matches := errorRegex.FindStringSubmatch(err.Error())
if len(matches) == 0 {
return 0
}
code, err := strconv.Atoi(matches[1])
code, err := strconv.Atoi(matches[0])
if err != nil {
fs.Debugf(nil, "failed parsing fichier error: %v", err)
return 0
@@ -61,7 +61,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
return false, err // No such user
case 186:
return false, err // IP blocked?
case 374, 412: // Flood detected seems to be #412 now
case 374:
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
time.Sleep(30 * time.Second)
default:
@@ -408,32 +408,6 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
return response, nil
}
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
request := &MoveDirRequest{
FolderID: folderID,
DestinationFolderID: destinationFolderID,
Rename: newLeaf,
// DestinationUser: destinationUser,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mv.cgi",
}
response = &MoveDirResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("couldn't move dir: %w", err)
}
return response, nil
}
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
request := &CopyFileRequest{
URLs: []string{url},

View File

@@ -441,28 +441,23 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
srcFs := srcObj.fs
// Find current directory ID
srcLeaf, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcObj.remote, false)
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
return nil, err
}
// Create temporary object
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote)
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
// If it is in the correct directory, just rename it
var url string
if srcDirectoryID == dstDirectoryID {
// No rename needed
if srcLeaf == dstLeaf {
return src, nil
}
resp, err := f.renameFile(ctx, srcObj.file.URL, dstLeaf)
if currentDirectoryID == directoryID {
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
if err != nil {
return nil, fmt.Errorf("couldn't rename file: %w", err)
}
@@ -471,16 +466,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
url = resp.URLs[0].URL
} else {
dstFolderID, err := strconv.Atoi(dstDirectoryID)
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
rename := dstLeaf
// No rename needed
if srcLeaf == dstLeaf {
rename = ""
}
resp, err := f.moveFile(ctx, srcObj.file.URL, dstFolderID, rename)
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, fmt.Errorf("couldn't move file: %w", err)
}
@@ -498,51 +488,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove.
//
// If destination exists then return fs.ErrorDirExists.
//
// This is complicated by the fact that we can't use moveDir to move
// to a different directory AND rename at the same time as it can
// overwrite files in the source directory.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
if err != nil {
return err
}
srcIDnumeric, err := strconv.Atoi(srcID)
if err != nil {
return err
}
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
if err != nil {
return err
}
var resp *MoveDirResponse
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
if err != nil {
return fmt.Errorf("couldn't rename leaf: %w", err)
}
if resp.Status != "OK" {
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
}
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// Copy src to this remote using server side move operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
@@ -616,7 +561,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)

View File

@@ -70,22 +70,6 @@ type MoveFileResponse struct {
URLs []string `json:"urls"`
}
// MoveDirRequest is the request structure of the corresponding request
type MoveDirRequest struct {
FolderID int `json:"folder_id"`
DestinationFolderID int `json:"destination_folder_id,omitempty"`
DestinationUser string `json:"destination_user"`
Rename string `json:"rename,omitempty"`
}
// MoveDirResponse is the response structure of the corresponding request
type MoveDirResponse struct {
Status string `json:"status"`
Message string `json:"message"`
OldName string `json:"old_name"`
NewName string `json:"new_name"`
}
// CopyFileRequest is the request structure of the corresponding request
type CopyFileRequest struct {
URLs []string `json:"urls"`

View File

@@ -216,11 +216,11 @@ var ItemFields = mustFields(Item{})
// fields returns the JSON fields in use by opt as a | separated
// string.
func fields(opt any) (pipeTags string, err error) {
func fields(opt interface{}) (pipeTags string, err error) {
var tags []string
def := reflect.ValueOf(opt)
defType := def.Type()
for i := range def.NumField() {
for i := 0; i < def.NumField(); i++ {
field := defType.Field(i)
tag, ok := field.Tag.Lookup("json")
if !ok {
@@ -239,7 +239,7 @@ func fields(opt any) (pipeTags string, err error) {
// mustFields returns the JSON fields in use by opt as a | separated
// string. It panics on failure.
func mustFields(opt any) string {
func mustFields(opt interface{}) string {
tags, err := fields(opt)
if err != nil {
panic(err)
@@ -351,12 +351,12 @@ type SpaceInfo struct {
// DeleteResponse is returned from doDeleteFile
type DeleteResponse struct {
Status
Deleted []string `json:"deleted"`
Errors []any `json:"errors"`
ID string `json:"fi_id"`
BackgroundTask int `json:"backgroundtask"`
UsSize string `json:"us_size"`
PaSize string `json:"pa_size"`
Deleted []string `json:"deleted"`
Errors []interface{} `json:"errors"`
ID string `json:"fi_id"`
BackgroundTask int `json:"backgroundtask"`
UsSize string `json:"us_size"`
PaSize string `json:"pa_size"`
//SpaceInfo SpaceInfo `json:"spaceinfo"`
}

View File

@@ -158,9 +158,9 @@ type Fs struct {
tokenMu sync.Mutex // hold when reading the token
token string // current access token
tokenExpiry time.Time // time the current token expires
tokenExpired atomic.Int32
canCopyWithName bool // set if detected that can use fi_name in copy
precision time.Duration // precision reported
tokenExpired int32 // read and written with atomic
canCopyWithName bool // set if detected that can use fi_name in copy
precision time.Duration // precision reported
}
// Object describes a filefabric object
@@ -243,7 +243,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
err = status // return the error from the RPC
code := status.GetCode()
if code == "login_token_expired" {
f.tokenExpired.Add(1)
atomic.AddInt32(&f.tokenExpired, 1)
} else {
for _, retryCode := range retryStatusCodes {
if code == retryCode.code {
@@ -323,12 +323,12 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
var refreshed = false
defer func() {
if refreshed {
f.tokenExpired.Store(0)
atomic.StoreInt32(&f.tokenExpired, 0)
}
f.tokenMu.Unlock()
}()
expired := f.tokenExpired.Load() != 0
expired := atomic.LoadInt32(&f.tokenExpired) != 0
if expired {
fs.Debugf(f, "Token invalid - refreshing")
}
@@ -371,7 +371,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
}
// params for rpc
type params map[string]any
type params map[string]interface{}
// rpc calls the rpc.php method of the SME file fabric
//

View File

@@ -1,900 +0,0 @@
// Package filescom provides an interface to the Files.com
// object storage system.
package filescom
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"slices"
"strings"
"time"
files_sdk "github.com/Files-com/files-sdk-go/v3"
"github.com/Files-com/files-sdk-go/v3/bundle"
"github.com/Files-com/files-sdk-go/v3/file"
file_migration "github.com/Files-com/files-sdk-go/v3/filemigration"
"github.com/Files-com/files-sdk-go/v3/folder"
"github.com/Files-com/files-sdk-go/v3/session"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
)
/*
Run of rclone info
stringNeedsEscaping = []rune{
'/', '\x00'
}
maxFileLength = 512 // for 1 byte unicode characters
maxFileLength = 512 // for 2 byte unicode characters
maxFileLength = 512 // for 3 byte unicode characters
maxFileLength = 512 // for 4 byte unicode characters
canWriteUnnormalized = true
canReadUnnormalized = true
canReadRenormalized = true
canStream = true
*/
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
folderNotEmpty = "processing-failure/folder-not-empty"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "filescom",
Description: "Files.com",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "site",
Help: "Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com).",
}, {
Name: "username",
Help: "The username used to authenticate with Files.com.",
}, {
Name: "password",
Help: "The password used to authenticate with Files.com.",
IsPassword: true,
}, {
Name: "api_key",
Help: "The API key used to authenticate with Files.com.",
Advanced: true,
Sensitive: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeRightSpace |
encoder.EncodeRightCrLfHtVt |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Site string `config:"site"`
Username string `config:"username"`
Password string `config:"password"`
APIKey string `config:"api_key"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote files.com server
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
fileClient *file.Client // the connection to the file API
folderClient *folder.Client // the connection to the folder API
migrationClient *file_migration.Client // the connection to the file migration API
bundleClient *bundle.Client // the connection to the bundle API
pacer *fs.Pacer // pacer for API calls
}
// Object describes a files object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
size int64 // size of the object
crc32 string // CRC32 of the object content
md5 string // MD5 of the object content
mimeType string // Content-Type of the object
modTime time.Time // modification time of the object
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("files root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Encode remote and turn it into an absolute path in the share
func (f *Fs) absPath(remote string) string {
return f.opt.Enc.FromStandardPath(path.Join(f.root, remote))
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if apiErr, ok := err.(files_sdk.ResponseError); ok {
if slices.Contains(retryErrorCodes, apiErr.HttpCode) {
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
}
return fserrors.ShouldRetry(err), err
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *files_sdk.File, err error) {
params := files_sdk.FileFindParams{
Path: f.absPath(path),
}
var file files_sdk.File
err = f.pacer.Call(func() (bool, error) {
file, err = f.fileClient.Find(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
return &file, nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
root = strings.Trim(root, "/")
config, err := newClientConfig(ctx, opt)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
fileClient: &file.Client{Config: config},
folderClient: &folder.Client{Config: config},
migrationClient: &file_migration.Client{Config: config},
bundleClient: &bundle.Client{Config: config},
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CaseInsensitive: true,
CanHaveEmptyDirectories: true,
ReadMimeType: true,
DirModTimeUpdatesOnWrite: true,
}).Fill(ctx, f)
if f.root != "" {
info, err := f.readMetaDataForPath(ctx, "")
if err == nil && !info.IsDir() {
f.root = path.Dir(f.root)
if f.root == "." {
f.root = ""
}
return f, fs.ErrorIsFile
}
}
return f, err
}
func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) {
if opt.Site != "" {
if strings.Contains(opt.Site, ".") {
config.EndpointOverride = opt.Site
} else {
config.Subdomain = opt.Site
}
_, err = url.ParseRequestURI(config.Endpoint())
if err != nil {
err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site)
return
}
}
config = config.Init().SetCustomClient(fshttp.NewClient(ctx))
if opt.APIKey != "" {
config.APIKey = opt.APIKey
return
}
if opt.Username == "" {
err = errors.New("username not found")
return
}
if opt.Password == "" {
err = errors.New("password not found")
return
}
opt.Password, err = obscure.Reveal(opt.Password)
if err != nil {
return
}
sessionClient := session.Client{Config: config}
params := files_sdk.SessionCreateParams{
Username: opt.Username,
Password: opt.Password,
}
thisSession, err := sessionClient.Create(params, files_sdk.WithContext(ctx))
if err != nil {
err = fmt.Errorf("couldn't create session: %w", err)
return
}
config.SessionId = thisSession.Id
return
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *files_sdk.File) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
var err error
if file != nil {
err = o.setMetaData(file)
} else {
err = o.readMetaData(ctx) // reads info and meta, returning an error
}
if err != nil {
return nil, err
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
var it *folder.Iter
params := files_sdk.FolderListForParams{
Path: f.absPath(dir),
}
err = f.pacer.Call(func() (bool, error) {
it, err = f.folderClient.ListFor(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("couldn't list files: %w", err)
}
for it.Next() {
item := ptr(it.File())
remote := f.opt.Enc.ToStandardPath(item.DisplayName)
remote = path.Join(dir, remote)
if remote == dir {
continue
}
if item.IsDir() {
d := fs.NewDir(remote, item.ModTime())
entries = append(entries, d)
} else {
o, err := f.newObjectWithInfo(ctx, remote, item)
if err != nil {
return nil, err
}
entries = append(entries, o)
}
}
err = it.Err()
if files_sdk.IsNotExist(err) {
return nil, fs.ErrorDirNotFound
}
return
}
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Returns the object and error.
//
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, err error) {
// Create the directory for the object if it doesn't exist
err = f.mkParentDir(ctx, remote)
if err != nil {
return
}
// Temporary Object under construction
o = &Object{
fs: f,
remote: remote,
}
return o, nil
}
// Put the object
//
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
}
return fs, fs.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
func (f *Fs) mkdir(ctx context.Context, path string) error {
if path == "" || path == "." {
return nil
}
params := files_sdk.FolderCreateParams{
Path: path,
MkdirParents: ptr(true),
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.folderClient.Create(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if files_sdk.IsExist(err) {
return nil
}
return err
}
// Make the parent directory of remote
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
return f.mkdir(ctx, path.Dir(f.absPath(remote)))
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.mkdir(ctx, f.absPath(dir))
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
o := Object{
fs: f,
remote: dir,
}
return o.SetModTime(ctx, modTime)
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
path := f.absPath(dir)
if path == "" || path == "." {
return errors.New("can't purge root directory")
}
params := files_sdk.FileDeleteParams{
Path: path,
Recursive: ptr(!check),
}
err := f.pacer.Call(func() (bool, error) {
err := f.fileClient.Delete(params, files_sdk.WithContext(ctx))
// Allow for eventual consistency deletion of child objects.
if isFolderNotEmpty(err) {
return true, err
}
return shouldRetry(ctx, err)
})
if err != nil {
if files_sdk.IsNotExist(err) {
return fs.ErrorDirNotFound
} else if isFolderNotEmpty(err) {
return fs.ErrorDirectoryNotEmpty
}
return fmt.Errorf("rmdir failed: %w", err)
}
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
}
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dstObj fs.Object, err error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
err = srcObj.readMetaData(ctx)
if err != nil {
return
}
srcPath := srcObj.fs.absPath(srcObj.remote)
dstPath := f.absPath(remote)
if strings.EqualFold(srcPath, dstPath) {
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
// Create temporary object
dstObj, err = f.createObject(ctx, remote)
if err != nil {
return
}
// Copy the object
params := files_sdk.FileCopyParams{
Path: srcPath,
Destination: dstPath,
Overwrite: ptr(true),
}
var action files_sdk.FileAction
err = f.pacer.Call(func() (bool, error) {
action, err = f.fileClient.Copy(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err != nil {
return
}
err = f.waitForAction(ctx, action, "copy")
if err != nil {
return
}
err = dstObj.SetModTime(ctx, srcObj.modTime)
return
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// move a file or folder
func (f *Fs) move(ctx context.Context, src *Fs, srcRemote string, dstRemote string) (info *files_sdk.File, err error) {
// Move the object
params := files_sdk.FileMoveParams{
Path: src.absPath(srcRemote),
Destination: f.absPath(dstRemote),
}
var action files_sdk.FileAction
err = f.pacer.Call(func() (bool, error) {
action, err = f.fileClient.Move(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
err = f.waitForAction(ctx, action, "move")
if err != nil {
return nil, err
}
info, err = f.readMetaDataForPath(ctx, dstRemote)
return
}
func (f *Fs) waitForAction(ctx context.Context, action files_sdk.FileAction, operation string) (err error) {
var migration files_sdk.FileMigration
err = f.pacer.Call(func() (bool, error) {
migration, err = f.migrationClient.Wait(action, func(migration files_sdk.FileMigration) {
// noop
}, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err == nil && migration.Status != "completed" {
return fmt.Errorf("%v did not complete successfully: %v", operation, migration.Status)
}
return
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Create temporary object
dstObj, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
// Do the move
info, err := f.move(ctx, srcObj.fs, srcObj.remote, dstObj.remote)
if err != nil {
return nil, err
}
err = dstObj.setMetaData(info)
if err != nil {
return nil, err
}
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
// Check if destination exists
_, err = f.readMetaDataForPath(ctx, dstRemote)
if err == nil {
return fs.ErrorDirExists
}
// Create temporary object
dstObj, err := f.createObject(ctx, dstRemote)
if err != nil {
return
}
// Do the move
_, err = f.move(ctx, srcFs, srcRemote, dstObj.remote)
return
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (url string, err error) {
params := files_sdk.BundleCreateParams{
Paths: []string{f.absPath(remote)},
}
if expire < fs.DurationOff {
params.ExpiresAt = ptr(time.Now().Add(time.Duration(expire)))
}
var bundle files_sdk.Bundle
err = f.pacer.Call(func() (bool, error) {
bundle, err = f.bundleClient.Create(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
url = bundle.Url
return
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet(hash.CRC32, hash.MD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
switch t {
case hash.CRC32:
if o.crc32 == "" {
return "", nil
}
return fmt.Sprintf("%08s", o.crc32), nil
case hash.MD5:
return o.md5, nil
}
return "", hash.ErrUnsupported
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// setMetaData sets the metadata from info
func (o *Object) setMetaData(file *files_sdk.File) error {
o.modTime = file.ModTime()
if !file.IsDir() {
o.size = file.Size
o.crc32 = file.Crc32
o.md5 = file.Md5
o.mimeType = file.MimeType
}
return nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
file, err := o.fs.readMetaDataForPath(ctx, o.remote)
if err != nil {
if files_sdk.IsNotExist(err) {
return fs.ErrorObjectNotFound
}
return err
}
if file.IsDir() {
return fs.ErrorIsDir
}
return o.setMetaData(file)
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
params := files_sdk.FileUpdateParams{
Path: o.fs.absPath(o.remote),
ProvidedMtime: &modTime,
}
var file files_sdk.File
err = o.fs.pacer.Call(func() (bool, error) {
file, err = o.fs.fileClient.Update(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err != nil {
return err
}
return o.setMetaData(&file)
}
// Storable returns a boolean showing whether this object storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Offset and Count for range download
var offset, count int64
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
if count < 0 {
count = o.size - offset
}
case *fs.SeekOption:
offset = x.Offset
count = o.size - offset
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
params := files_sdk.FileDownloadParams{
Path: o.fs.absPath(o.remote),
}
headers := &http.Header{}
headers.Set("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+count-1))
err = o.fs.pacer.Call(func() (bool, error) {
_, err = o.fs.fileClient.Download(
params,
files_sdk.WithContext(ctx),
files_sdk.RequestHeadersOption(headers),
files_sdk.ResponseBodyOption(func(closer io.ReadCloser) error {
in = closer
return err
}),
)
return shouldRetry(ctx, err)
})
return
}
// Returns a pointer to t - useful for returning pointers to constants
func ptr[T any](t T) *T {
return &t
}
func isFolderNotEmpty(err error) bool {
var re files_sdk.ResponseError
ok := errors.As(err, &re)
return ok && re.Type == folderNotEmpty
}
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one.
//
// The new object may have been created if an error is returned.
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
uploadOpts := []file.UploadOption{
file.UploadWithContext(ctx),
file.UploadWithReader(in),
file.UploadWithDestinationPath(o.fs.absPath(o.remote)),
file.UploadWithProvidedMtime(src.ModTime(ctx)),
}
err := o.fs.pacer.Call(func() (bool, error) {
err := o.fs.fileClient.Upload(uploadOpts...)
return shouldRetry(ctx, err)
})
if err != nil {
return err
}
return o.readMetaData(ctx)
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
params := files_sdk.FileDeleteParams{
Path: o.fs.absPath(o.remote),
}
return o.fs.pacer.Call(func() (bool, error) {
err := o.fs.fileClient.Delete(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.mimeType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -1,17 +0,0 @@
// Test Files filesystem interface
package filescom_test
import (
"testing"
"github.com/rclone/rclone/backend/filescom"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFilesCom:",
NilObject: (*filescom.Object)(nil),
})
}

View File

@@ -15,7 +15,7 @@ import (
"sync"
"time"
"github.com/jlaffaye/ftp"
"github.com/rclone/ftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
@@ -28,7 +28,6 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/proxy"
"github.com/rclone/rclone/lib/readers"
)
@@ -85,7 +84,7 @@ to an encrypted one. Cannot be used in combination with implicit FTPS.`,
Default: false,
}, {
Name: "concurrency",
Help: strings.ReplaceAll(`Maximum number of FTP simultaneous connections, 0 for unlimited.
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
Note that setting this is very likely to cause deadlocks so it should
be used with care.
@@ -99,7 +98,7 @@ maximum of |--checkers| and |--transfers|.
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
--check-first| or |--checkers 1 --transfers 1|.
`, "|", "`"),
`, "|", "`", -1),
Default: 0,
Advanced: true,
}, {
@@ -173,34 +172,6 @@ Enabled by default. Use 0 to disable.`,
Help: `Allow asking for FTP password when needed.
If this is set and no password is supplied then rclone will ask for a password
`,
Advanced: true,
}, {
Name: "socks_proxy",
Default: "",
Help: `Socks 5 proxy host.
Supports the format user:pass@host:port, user@host:port, host:port.
Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}, {
Name: "no_check_upload",
Default: false,
Help: `Don't check the upload is OK
Normally rclone will try to check the upload exists after it has
uploaded a file to make sure the size and modification time are as
expected.
This flag stops rclone doing these checks. This enables uploading to
folders which are write only.
You will likely need to use the --inplace flag also if uploading to
a write only folder.
`,
Advanced: true,
}, {
@@ -247,8 +218,6 @@ type Options struct {
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
NoCheckUpload bool `config:"no_check_upload"`
}
// Fs represents a remote FTP server
@@ -266,6 +235,7 @@ type Fs struct {
pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser
tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections
fGetTime bool // true if the ftp library accepts GetTime
fSetTime bool // true if the ftp library accepts SetTime
@@ -378,36 +348,10 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
return fserrors.ShouldRetry(err), err
}
// Get a TLS config with a unique session cache.
//
// We can't share session caches between connections.
//
// See: https://github.com/rclone/rclone/issues/7234
func (f *Fs) tlsConfig() *tls.Config {
var tlsConfig *tls.Config
if f.opt.TLS || f.opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
if f.opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
}
if f.opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
return tlsConfig
}
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server")
// tls.Config for this connection only. Will be used for data
// and control connections.
tlsConfig := f.tlsConfig()
// Make ftp library dial with fshttp dialer optionally using TLS
initialConnection := true
dial := func(network, address string) (conn net.Conn, err error) {
@@ -415,17 +359,12 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
defer func() {
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
}()
baseDialer := fshttp.NewDialer(ctx)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
} else {
conn, err = baseDialer.Dial(network, address)
}
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
if err != nil {
return nil, err
}
// Connect using cleartext only for non TLS
if tlsConfig == nil {
if f.tlsConf == nil {
return conn, nil
}
// Initial connection only needs to be cleartext for explicit TLS
@@ -434,7 +373,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
return conn, nil
}
// Upgrade connection to TLS
tlsConn := tls.Client(conn, tlsConfig)
tlsConn := tls.Client(conn, f.tlsConf)
// Do the initial handshake - tls.Client doesn't do it for us
// If we do this then connections to proftpd/pureftpd lock up
// See: https://github.com/rclone/rclone/issues/6426
@@ -456,9 +395,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf
// as a trigger for sending PSBZ and PROT options to server.
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
} else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -613,6 +552,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if opt.TLS && opt.ExplicitTLS {
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
}
var tlsConfig *tls.Config
if opt.TLS || opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert,
}
if opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
}
if opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx)
f := &Fs{
@@ -625,6 +577,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
pass: pass,
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
tlsConf: tlsConfig,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
@@ -987,8 +940,6 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
f.putFtpConnection(&c, err)
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusRequestedFileActionOK: // some ftp servers apparently return 250 instead of 257
err = nil // see: https://forum.rclone.org/t/rclone-pop-up-an-i-o-error-when-creating-a-folder-in-a-mounted-ftp-drive/44368/
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
err = nil
case 521: // dir already exists: error number according to RFC 959: issue #2363
@@ -1320,16 +1271,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return fmt.Errorf("update stor: %w", err)
}
o.fs.putFtpConnection(&c, nil)
if o.fs.opt.NoCheckUpload {
o.info = &FileInfo{
Name: o.remote,
Size: uint64(src.Size()),
ModTime: src.ModTime(ctx),
precise: true,
IsDir: false,
}
return nil
}
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
return fmt.Errorf("SetModTime: %w", err)
}

View File

@@ -17,7 +17,7 @@ import (
"github.com/stretchr/testify/require"
)
type settings map[string]any
type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash

View File

@@ -1,311 +0,0 @@
// Package api has type definitions for gofile
//
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
package api
import (
"fmt"
"time"
)
const (
// 2017-05-03T07:26:10-07:00
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents date and time information for the
// gofile API, by using RFC3339
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).Format(timeFormat)
return []byte(timeString), nil
}
// UnmarshalJSON turns JSON into a Time
func (t *Time) UnmarshalJSON(data []byte) error {
newT, err := time.Parse(timeFormat, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// Error is returned from gofile when things go wrong
type Error struct {
Status string `json:"status"`
}
// Error returns a string for the error and satisfies the error interface
func (e Error) Error() string {
out := fmt.Sprintf("Error %q", e.Status)
return out
}
// IsError returns true if there is an error
func (e Error) IsError() bool {
return e.Status != "ok"
}
// Err returns err if not nil, or e if IsError or nil
func (e Error) Err(err error) error {
if err != nil {
return err
}
if e.IsError() {
return e
}
return nil
}
// Check Error satisfies the error interface
var _ error = (*Error)(nil)
// Types of things in Item
const (
ItemTypeFolder = "folder"
ItemTypeFile = "file"
)
// Item describes a folder or a file as returned by /contents
type Item struct {
ID string `json:"id"`
ParentFolder string `json:"parentFolder"`
Type string `json:"type"`
Name string `json:"name"`
Size int64 `json:"size"`
Code string `json:"code"`
CreateTime int64 `json:"createTime"`
ModTime int64 `json:"modTime"`
Link string `json:"link"`
MD5 string `json:"md5"`
MimeType string `json:"mimetype"`
ChildrenCount int `json:"childrenCount"`
DirectLinks map[string]*DirectLink `json:"directLinks"`
//Public bool `json:"public"`
//ServerSelected string `json:"serverSelected"`
//Thumbnail string `json:"thumbnail"`
//DownloadCount int `json:"downloadCount"`
//TotalDownloadCount int64 `json:"totalDownloadCount"`
//TotalSize int64 `json:"totalSize"`
//ChildrenIDs []string `json:"childrenIds"`
Children map[string]*Item `json:"children"`
}
// ToNativeTime converts a go time to a native time
func ToNativeTime(t time.Time) int64 {
return t.Unix()
}
// FromNativeTime converts native time to a go time
func FromNativeTime(t int64) time.Time {
return time.Unix(t, 0)
}
// DirectLink describes a direct link to a file so it can be
// downloaded by third parties.
type DirectLink struct {
ExpireTime int64 `json:"expireTime"`
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
DomainsAllowed []any `json:"domainsAllowed"`
Auth []any `json:"auth"`
IsReqLink bool `json:"isReqLink"`
DirectLink string `json:"directLink"`
}
// Contents is returned from the /contents call
type Contents struct {
Error
Data struct {
Item
} `json:"data"`
Metadata Metadata `json:"metadata"`
}
// Metadata is returned when paging is in use
type Metadata struct {
TotalCount int `json:"totalCount"`
TotalPages int `json:"totalPages"`
Page int `json:"page"`
PageSize int `json:"pageSize"`
HasNextPage bool `json:"hasNextPage"`
}
// AccountsGetID is the result of /accounts/getid
type AccountsGetID struct {
Error
Data struct {
ID string `json:"id"`
} `json:"data"`
}
// Stats of storage and traffic
type Stats struct {
FolderCount int64 `json:"folderCount"`
FileCount int64 `json:"fileCount"`
Storage int64 `json:"storage"`
TrafficDirectGenerated int64 `json:"trafficDirectGenerated"`
TrafficReqDownloaded int64 `json:"trafficReqDownloaded"`
TrafficWebDownloaded int64 `json:"trafficWebDownloaded"`
}
// AccountsGet is the result of /accounts/{id}
type AccountsGet struct {
Error
Data struct {
ID string `json:"id"`
Email string `json:"email"`
Tier string `json:"tier"`
PremiumType string `json:"premiumType"`
Token string `json:"token"`
RootFolder string `json:"rootFolder"`
SubscriptionProvider string `json:"subscriptionProvider"`
SubscriptionEndDate int `json:"subscriptionEndDate"`
SubscriptionLimitDirectTraffic int64 `json:"subscriptionLimitDirectTraffic"`
SubscriptionLimitStorage int64 `json:"subscriptionLimitStorage"`
StatsCurrent Stats `json:"statsCurrent"`
// StatsHistory map[int]map[int]map[int]Stats `json:"statsHistory"`
} `json:"data"`
}
// CreateFolderRequest is the input to /contents/createFolder
type CreateFolderRequest struct {
ParentFolderID string `json:"parentFolderId"`
FolderName string `json:"folderName"`
ModTime int64 `json:"modTime,omitempty"`
}
// CreateFolderResponse is the output from /contents/createFolder
type CreateFolderResponse struct {
Error
Data Item `json:"data"`
}
// DeleteRequest is the input to DELETE /contents
type DeleteRequest struct {
ContentsID string `json:"contentsId"` // comma separated list of IDs
}
// DeleteResponse is the input to DELETE /contents
type DeleteResponse struct {
Error
Data map[string]Error
}
// Server is an upload server
type Server struct {
Name string `json:"name"`
Zone string `json:"zone"`
}
// String returns a string representation of the Server
func (s *Server) String() string {
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
}
// Root returns the root URL for the server
func (s *Server) Root() string {
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
}
// URL returns the upload URL for the server
func (s *Server) URL() string {
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
}
// ServersResponse is the output from /servers
type ServersResponse struct {
Error
Data struct {
Servers []Server `json:"servers"`
} `json:"data"`
}
// UploadResponse is returned by POST /contents/uploadfile
type UploadResponse struct {
Error
Data Item `json:"data"`
}
// DirectLinksRequest specifies the parameters for the direct link
type DirectLinksRequest struct {
ExpireTime int64 `json:"expireTime,omitempty"`
SourceIpsAllowed []any `json:"sourceIpsAllowed,omitempty"`
DomainsAllowed []any `json:"domainsAllowed,omitempty"`
Auth []any `json:"auth,omitempty"`
}
// DirectLinksResult is returned from POST /contents/{id}/directlinks
type DirectLinksResult struct {
Error
Data struct {
ExpireTime int64 `json:"expireTime"`
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
DomainsAllowed []any `json:"domainsAllowed"`
Auth []any `json:"auth"`
IsReqLink bool `json:"isReqLink"`
ID string `json:"id"`
DirectLink string `json:"directLink"`
} `json:"data"`
}
// UpdateItemRequest describes the updates to be done to an item for PUT /contents/{id}/update
//
// The Value of the attribute to define :
// For Attribute "name" : The name of the content (file or folder)
// For Attribute "description" : The description displayed on the download page (folder only)
// For Attribute "tags" : A comma-separated list of tags (folder only)
// For Attribute "public" : either true or false (folder only)
// For Attribute "expiry" : A unix timestamp of the expiration date (folder only)
// For Attribute "password" : The password to set (folder only)
type UpdateItemRequest struct {
Attribute string `json:"attribute"`
Value any `json:"attributeValue"`
}
// UpdateItemResponse is returned by PUT /contents/{id}/update
type UpdateItemResponse struct {
Error
Data Item `json:"data"`
}
// MoveRequest is the input to /contents/move
type MoveRequest struct {
FolderID string `json:"folderId"`
ContentsID string `json:"contentsId"` // comma separated list of IDs
}
// MoveResponse is returned by POST /contents/move
type MoveResponse struct {
Error
Data map[string]struct {
Error
Item `json:"data"`
} `json:"data"`
}
// CopyRequest is the input to /contents/copy
type CopyRequest struct {
FolderID string `json:"folderId"`
ContentsID string `json:"contentsId"` // comma separated list of IDs
}
// CopyResponse is returned by POST /contents/copy
type CopyResponse struct {
Error
Data map[string]struct {
Error
Item `json:"data"`
} `json:"data"`
}
// UploadServerStatus is returned when fetching the root of an upload server
type UploadServerStatus struct {
Error
Data struct {
Server string `json:"server"`
Test string `json:"test"`
} `json:"data"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test Gofile filesystem interface
package gofile_test
import (
"testing"
"github.com/rclone/rclone/backend/gofile"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestGoFile:",
NilObject: (*gofile.Object)(nil),
})
}

View File

@@ -35,7 +35,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
@@ -62,10 +62,9 @@ const (
var (
// Description of how to auth for this app
storageConfig = &oauthutil.Config{
storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageReadWriteScope},
AuthURL: google.Endpoint.AuthURL,
TokenURL: google.Endpoint.TokenURL,
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
@@ -107,12 +106,6 @@ func init() {
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
Sensitive: true,
}, {
Name: "access_token",
Help: "Short-lived access token.\n\nLeave blank normally.\nNeeded only if you want use short-lived access token instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Sensitive: true,
Advanced: true,
}, {
Name: "anonymous",
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
@@ -386,7 +379,6 @@ type Options struct {
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
DirectoryMarkers bool `config:"directory_markers"`
AccessToken string `config:"access_token"`
}
// Fs represents a remote storage server
@@ -543,9 +535,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
} else if opt.AccessToken != "" {
ts := oauth2.Token{AccessToken: opt.AccessToken}
oAuthClient = oauth2.NewClient(ctx, oauth2.StaticTokenSource(&ts))
} else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
if err != nil {
@@ -708,7 +697,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
// is this a directory marker?
if isDirectory {
// Don't insert the root directory
if remote == f.opt.Enc.ToStandardPath(directory) {
if remote == directory {
continue
}
// process directory markers as directories
@@ -845,7 +834,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
@@ -955,6 +944,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
return e
}
return f.createDirectoryMarker(ctx, bucket, dir)
}
// mkdirParent creates the parent bucket/directory if it doesn't exist
@@ -1320,11 +1310,10 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.url
if o.fs.opt.UserProject != "" {
url += "&userProject=" + o.fs.opt.UserProject
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
}
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
if err != nil {
return nil, err
}

View File

@@ -4,7 +4,6 @@ package googlephotos
import (
"path"
"slices"
"strings"
"sync"
@@ -120,7 +119,7 @@ func (as *albums) _del(album *api.Album) {
dirs := as.path[dir]
for i, dir := range dirs {
if dir == leaf {
dirs = slices.Delete(dirs, i, i+1)
dirs = append(dirs[:i], dirs[i+1:]...)
break
}
}

View File

@@ -56,7 +56,8 @@ type MediaItem struct {
CreationTime time.Time `json:"creationTime"`
Width string `json:"width"`
Height string `json:"height"`
Photo struct{} `json:"photo"`
Photo struct {
} `json:"photo"`
} `json:"mediaMetadata"`
Filename string `json:"filename"`
}
@@ -67,7 +68,7 @@ type MediaItems struct {
NextPageToken string `json:"nextPageToken"`
}
// Content categories
//Content categories
// NONE Default content category. This category is ignored when any other category is used in the filter.
// LANDSCAPES Media items containing landscapes.
// RECEIPTS Media items containing receipts.
@@ -186,5 +187,5 @@ type BatchCreateResponse struct {
// BatchRemoveItems is for removing items from an album
type BatchRemoveItems struct {
MediaItemIDs []string `json:"mediaItemIds"`
MediaItemIds []string `json:"mediaItemIds"`
}

View File

@@ -28,11 +28,12 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
)
@@ -43,7 +44,6 @@ var (
errAlbumDelete = errors.New("google photos API does not implement deleting albums")
errRemove = errors.New("google photos API only implements removing files from albums")
errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created")
errReadOnly = errors.New("can't upload files in read only mode")
)
const (
@@ -53,45 +53,24 @@ const (
listChunks = 100 // chunk size to read directory listings
albumChunks = 50 // chunk size to read album listings
minSleep = 10 * time.Millisecond
scopeAppendOnly = "https://www.googleapis.com/auth/photoslibrary.appendonly"
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly.appcreateddata"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary.edit.appcreateddata"
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
scopeAccess = 2 // position of access scope in list
)
var (
// scopes needed for read write access
scopesReadWrite = []string{
"openid",
"profile",
scopeAppendOnly,
scopeReadOnly,
scopeReadWrite,
}
// scopes needed for read only access
scopesReadOnly = []string{
"openid",
"profile",
scopeReadOnly,
}
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: scopesReadWrite,
AuthURL: google.Endpoint.AuthURL,
TokenURL: google.Endpoint.TokenURL,
oauthConfig = &oauth2.Config{
Scopes: []string{
"openid",
"profile",
scopeReadWrite, // this must be at position scopeAccess
},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
// Configure the batcher
defaultBatcherOptions = batcher.Options{
MaxBatchSize: 50,
DefaultTimeoutSync: 1000 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 50,
}
)
// Register with Fs
@@ -113,9 +92,9 @@ func init() {
case "":
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes = scopesReadOnly
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
} else {
oauthConfig.Scopes = scopesReadWrite
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
}
return oauthutil.ConfigOut("warning", &oauthutil.Options{
OAuth2Config: oauthConfig,
@@ -132,7 +111,7 @@ will count towards storage in your Google Account.`)
}
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: append(append(oauthutil.SharedOptions, []fs.Option{{
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "read_only",
Default: false,
Help: `Set to make the Google Photos backend read only.
@@ -172,34 +151,6 @@ listings and transferred.
Without this flag, archived media will not be visible in directory
listings and won't be transferred.`,
Advanced: true,
}, {
Name: "proxy",
Default: "",
Help: strings.ReplaceAll(`Use the gphotosdl proxy for downloading the full resolution images
The Google API will deliver images and video which aren't full
resolution, and/or have EXIF data missing.
However if you use the gphotosdl proxy then you can download original,
unchanged images.
This runs a headless browser in the background.
Download the software from [gphotosdl](https://github.com/rclone/gphotosdl)
First run with
gphotosdl -login
Then once you have logged into google photos close the browser window
and run
gphotosdl
Then supply the parameter |--gphotos-proxy "http://localhost:8282"| to make
rclone use the proxy.
`, "|", "`"),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -207,7 +158,7 @@ rclone use the proxy.
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}}...), defaultBatcherOptions.FsOptions("")...),
}}...),
})
}
@@ -218,10 +169,6 @@ type Options struct {
StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"`
Enc encoder.MultiEncoder `config:"encoding"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
Proxy string `config:"proxy"`
}
// Fs represents a remote storage server
@@ -240,7 +187,6 @@ type Fs struct {
uploadedMu sync.Mutex // to protect the below
uploaded dirtree.DirTree // record of uploaded items
createMu sync.Mutex // held when creating albums to prevent dupes
batcher *batcher.Batcher[uploadedItem, *api.MediaItem]
}
// Object describes a storage object
@@ -321,7 +267,7 @@ func errorHandler(resp *http.Response) error {
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
body = []byte("Image not found or broken")
}
e := api.Error{
var e = api.Error{
Details: api.ErrorDetails{
Code: resp.StatusCode,
Message: string(body),
@@ -366,14 +312,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
albums: map[bool]*albums{},
uploaded: dirtree.New(),
}
batcherOptions := defaultBatcherOptions
batcherOptions.Mode = f.opt.BatchMode
batcherOptions.Size = f.opt.BatchSize
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
if err != nil {
return nil, err
}
f.features = (&fs.Features{
ReadMimeType: true,
}).Fill(ctx, f)
@@ -401,7 +339,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
Method: "GET",
RootURL: "https://accounts.google.com/.well-known/openid-configuration",
}
var openIDconfig map[string]any
var openIDconfig map[string]interface{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
return shouldRetry(ctx, resp, err)
@@ -461,7 +399,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
"token_type_hint": []string{"access_token"},
},
}
var res any
var res interface{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
return shouldRetry(ctx, resp, err)
@@ -495,7 +433,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Med
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// defer log.Trace(f, "remote=%q", remote)("")
defer log.Trace(f, "remote=%q", remote)("")
return f.newObjectWithInfo(ctx, remote, nil)
}
@@ -661,7 +599,9 @@ func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter
if err != nil {
return err
}
entries = append(entries, entry)
if entry != nil {
entries = append(entries, entry)
}
return nil
})
if err != nil {
@@ -708,7 +648,7 @@ func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
match, prefix, pattern := patterns.match(f.root, dir, false)
if pattern == nil || pattern.isFile {
return nil, fs.ErrorDirNotFound
@@ -725,7 +665,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// defer log.Trace(f, "src=%+v", src)("")
defer log.Trace(f, "src=%+v", src)("")
// Temporary Object under construction
o := &Object{
fs: f,
@@ -741,7 +681,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
Path: "/albums",
Parameters: url.Values{},
}
request := api.CreateAlbum{
var request = api.CreateAlbum{
Album: &api.Album{
Title: albumTitle,
},
@@ -778,7 +718,7 @@ func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *ap
// Mkdir creates the album if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
match, prefix, pattern := patterns.match(f.root, dir, false)
if pattern == nil {
return fs.ErrorDirNotFound
@@ -802,7 +742,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
// defer log.Trace(f, "dir=%q")("err=%v", &err)
defer log.Trace(f, "dir=%q")("err=%v", &err)
match, _, pattern := patterns.match(f.root, dir, false)
if pattern == nil {
return fs.ErrorDirNotFound
@@ -841,13 +781,6 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
f.batcher.Shutdown()
return nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -875,7 +808,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
// defer log.Trace(o, "")("")
defer log.Trace(o, "")("")
if !o.fs.opt.ReadSize || o.bytes >= 0 {
return o.bytes
}
@@ -976,7 +909,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
// defer log.Trace(o, "")("")
defer log.Trace(o, "")("")
err := o.readMetaData(ctx)
if err != nil {
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
@@ -1006,20 +939,16 @@ func (o *Object) downloadURL() string {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// defer log.Trace(o, "")("")
defer log.Trace(o, "")("")
err = o.readMetaData(ctx)
if err != nil {
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
return nil, err
}
url := o.downloadURL()
if o.fs.opt.Proxy != "" {
url = strings.TrimRight(o.fs.opt.Proxy, "/") + "/id/" + o.id
}
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: url,
RootURL: o.downloadURL(),
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1032,87 +961,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return resp.Body, err
}
// input to the batcher
type uploadedItem struct {
AlbumID string // desired album
UploadToken string // upload ID
}
// Commit a batch of items to albumID returning the errors in errors
func (f *Fs) commitBatchAlbumID(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error, albumID string) {
// Create the media item from an UploadToken, optionally adding to an album
opts := rest.Opts{
Method: "POST",
Path: "/mediaItems:batchCreate",
}
request := api.BatchCreateRequest{
AlbumID: albumID,
}
itemsInBatch := 0
for i := range items {
if items[i].AlbumID == albumID {
request.NewMediaItems = append(request.NewMediaItems, api.NewMediaItem{
SimpleMediaItem: api.SimpleMediaItem{
UploadToken: items[i].UploadToken,
},
})
itemsInBatch++
}
}
var result api.BatchCreateResponse
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
err = fmt.Errorf("failed to create media item: %w", err)
}
if err == nil && len(result.NewMediaItemResults) != itemsInBatch {
err = fmt.Errorf("bad response to BatchCreate expecting %d items but got %d", itemsInBatch, len(result.NewMediaItemResults))
}
j := 0
for i := range items {
if items[i].AlbumID == albumID {
if err == nil {
media := &result.NewMediaItemResults[j]
if media.Status.Code != 0 {
errors[i] = fmt.Errorf("upload failed: %s (%d)", media.Status.Message, media.Status.Code)
} else {
results[i] = &media.MediaItem
}
} else {
errors[i] = err
}
j++
}
}
}
// Called by the batcher to commit a batch
func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error) (err error) {
// Discover all the AlbumIDs as we have to upload these separately
//
// Should maybe have one batcher per AlbumID
albumIDs := map[string]struct{}{}
for i := range items {
albumIDs[items[i].AlbumID] = struct{}{}
}
// batch the albums
for albumID := range albumIDs {
// errors returned in errors
f.commitBatchAlbumID(ctx, items, results, errors, albumID)
}
return nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
// defer log.Trace(o, "src=%+v", src)("err=%v", &err)
defer log.Trace(o, "src=%+v", src)("err=%v", &err)
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
if pattern == nil || !pattern.isFile || !pattern.canUpload {
return errCantUpload
@@ -1133,9 +986,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
if !album.IsWriteable {
if o.fs.opt.ReadOnly {
return errReadOnly
}
return errOwnAlbums
}
@@ -1171,29 +1021,37 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.New("empty upload token")
}
uploaded := uploadedItem{
AlbumID: albumID,
UploadToken: uploadToken,
// Create the media item from an UploadToken, optionally adding to an album
opts = rest.Opts{
Method: "POST",
Path: "/mediaItems:batchCreate",
}
// Save the upload into an album
var info *api.MediaItem
if o.fs.batcher.Batching() {
info, err = o.fs.batcher.Commit(ctx, o.remote, uploaded)
} else {
errors := make([]error, 1)
results := make([]*api.MediaItem, 1)
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
if err == nil {
err = errors[0]
info = results[0]
}
var request = api.BatchCreateRequest{
AlbumID: albumID,
NewMediaItems: []api.NewMediaItem{
{
SimpleMediaItem: api.SimpleMediaItem{
UploadToken: uploadToken,
},
},
},
}
var result api.BatchCreateResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("failed to commit batch: %w", err)
return fmt.Errorf("failed to create media item: %w", err)
}
o.setMetaData(info)
if len(result.NewMediaItemResults) != 1 {
return errors.New("bad response to BatchCreate wrong number of items")
}
mediaItemResult := result.NewMediaItemResults[0]
if mediaItemResult.Status.Code != 0 {
return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
}
o.setMetaData(&mediaItemResult.MediaItem)
// Add upload to internal storage
if pattern.isUpload {
@@ -1220,8 +1078,8 @@ func (o *Object) Remove(ctx context.Context) (err error) {
Path: "/albums/" + album.ID + ":batchRemoveMediaItems",
NoResponse: true,
}
request := api.BatchRemoveItems{
MediaItemIDs: []string{o.id},
var request = api.BatchRemoveItems{
MediaItemIds: []string{o.id},
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {

View File

@@ -2,7 +2,6 @@ package googlephotos
import (
"context"
"errors"
"fmt"
"io"
"net/http"
@@ -36,7 +35,7 @@ func TestIntegration(t *testing.T) {
*fstest.RemoteName = "TestGooglePhotos:"
}
f, err := fs.NewFs(ctx, *fstest.RemoteName)
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
if err == fs.ErrorNotFoundInConfigFile {
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
}
require.NoError(t, err)

View File

@@ -38,7 +38,7 @@ type dirPattern struct {
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
}
// dirPatterns is a slice of all the directory patterns
// dirPatters is a slice of all the directory patterns
type dirPatterns []dirPattern
// patterns describes the layout of the google photos backend file system.

View File

@@ -24,7 +24,7 @@ import (
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "drop":
return nil, f.db.Stop(true)
@@ -80,14 +80,6 @@ func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
}
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
}
if f.db == nil {
if f.opt.MaxAge == 0 {
fs.Errorf(f, "db not found. (disabled with max_age = 0)")
} else {
fs.Errorf(f, "db not found.")
}
return kv.ErrInactive
}
op := &kvDump{
full: full,
root: root,

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/kv"
)
@@ -115,13 +114,6 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
root: rpath,
opt: opt,
}
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
baseFeatures := baseFs.Features()
f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported
@@ -165,27 +157,19 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
}
stubFeatures := &fs.Features{
CanHaveEmptyDirectories: true,
IsLocal: true,
ReadMimeType: true,
WriteMimeType: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
CanHaveEmptyDirectories: true,
IsLocal: true,
ReadMimeType: true,
WriteMimeType: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
// Enable ListP always
f.features.ListP = f.ListP
cache.PinUntilFinalized(f.Fs, f)
return f, err
}
@@ -241,39 +225,10 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
// List the objects and directories in dir into entries.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.wrapEntries(entries)
if err != nil {
return err
}
return callback(entries)
if entries, err = f.Fs.List(ctx, dir); err != nil {
return nil, err
}
listP := f.Fs.Features().ListP
if listP == nil {
entries, err := f.Fs.List(ctx, dir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
return f.wrapEntries(entries)
}
// ListR lists the objects and directories recursively into out.
@@ -379,22 +334,6 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return errors.New("MergeDirs not supported")
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
if do := f.Fs.Features().DirSetModTime; do != nil {
return do(ctx, dir, modTime)
}
return fs.ErrorNotImplemented
}
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
if do := f.Fs.Features().MkdirMetadata; do != nil {
return do(ctx, dir, metadata)
}
return nil, fs.ErrorNotImplemented
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
@@ -472,9 +411,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Shutdown the backend, closing any background tasks and any cached connections.
func (f *Fs) Shutdown(ctx context.Context) (err error) {
if f.db != nil && !f.db.IsStopped() {
err = f.db.Stop(false)
}
err = f.db.Stop(false)
if do := f.Fs.Features().Shutdown; do != nil {
if err2 := do(ctx); err2 != nil {
err = err2
@@ -568,17 +505,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx)
}
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -595,8 +521,6 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)

View File

@@ -60,11 +60,9 @@ func (f *Fs) testUploadFromCrypt(t *testing.T) {
assert.NotNil(t, dst)
// check that hash was created
if f.opt.MaxAge > 0 {
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
assert.NoError(t, err)
assert.NotEmpty(t, hash)
}
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
assert.NoError(t, err)
assert.NotEmpty(t, hash)
//t.Logf("hash is %q", hash)
_ = operations.Purge(ctx, f, dirName)
}

View File

@@ -23,7 +23,6 @@ func TestIntegration(t *testing.T) {
NilObject: (*hasher.Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"OpenChunkWriter",
},
UnimplementableObjectMethods: []string{},
}
@@ -37,9 +36,4 @@ func TestIntegration(t *testing.T) {
opt.QuickTestOK = true
}
fstests.Run(t, &opt)
// test again with MaxAge = 0
if *fstest.RemoteName == "" {
opt.ExtraConfig = append(opt.ExtraConfig, fstests.ExtraConfigItem{Name: "TestHasher", Key: "max_age", Value: "0"})
fstests.Run(t, &opt)
}
}

View File

@@ -6,7 +6,6 @@ import (
"encoding/gob"
"errors"
"fmt"
"maps"
"strings"
"time"
@@ -196,7 +195,9 @@ func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
r.Fp = op.fp
}
maps.Copy(r.Hashes, op.hashes)
for hashType, hashVal := range op.hashes {
r.Hashes[hashType] = hashVal
}
if data, err = r.encode(op.key); err != nil {
return fmt.Errorf("marshal failed: %w", err)
}

Some files were not shown because too many files have changed in this diff Show More