1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

..

4 Commits

Author SHA1 Message Date
Nick Craig-Wood
a2f55eccbc zoho: attempt to fix large file upload - not working DO NOT MERGE 2022-09-17 17:17:58 +01:00
Nick Craig-Wood
3933b1e7f5 compress: add extra debugging in case we have a repeat of #6434 2022-09-17 16:40:45 +01:00
Nick Craig-Wood
151224d3f8 compress: fix error handling to not use or return nil objects #6434 2022-09-17 16:37:45 +01:00
Nick Craig-Wood
1f28f4d05d compress: fix crash due to nil metadata #6434
Before this fix, if an error ocurred reading the metadata, it could be
set as nil and then used, causing a crash.

This fix changes the readMetadata function so it returns an error, and
the error is always set if the metadata returned is nil.
2022-09-17 16:37:26 +01:00
1835 changed files with 87573 additions and 335566 deletions

4
.gitattributes vendored
View File

@@ -1,7 +1,3 @@
# Go writes go.mod and go.sum with lf even on windows
go.mod text eol=lf
go.sum text eol=lf
# Ignore generated files in GitHub language statistics and diffs # Ignore generated files in GitHub language statistics and diffs
/MANUAL.* linguist-generated=true /MANUAL.* linguist-generated=true
/rclone.1 linguist-generated=true /rclone.1 linguist-generated=true

4
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,4 @@
github: [ncw]
patreon: njcw
liberapay: ncw
custom: ["https://rclone.org/donate/"]

View File

@@ -1,6 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -8,33 +8,29 @@ name: build
on: on:
push: push:
branches: branches:
- '**' - '*'
tags: tags:
- '**' - '*'
pull_request: pull_request:
workflow_dispatch: workflow_dispatch:
inputs: inputs:
manual: manual:
description: Manual run (bypass default conditions) required: true
type: boolean
default: true default: true
jobs: jobs:
build: build:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 60 timeout-minutes: 60
defaults:
run:
shell: bash
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.23'] job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
include: include:
- job_name: linux - job_name: linux
os: ubuntu-latest os: ubuntu-latest
go: '>=1.24.0-rc.1' go: '1.19.x'
gotags: cmount gotags: cmount
build_flags: '-include "^linux/"' build_flags: '-include "^linux/"'
check: true check: true
@@ -45,14 +41,14 @@ jobs:
- job_name: linux_386 - job_name: linux_386
os: ubuntu-latest os: ubuntu-latest
go: '>=1.24.0-rc.1' go: '1.19.x'
goarch: 386 goarch: 386
gotags: cmount gotags: cmount
quicktest: true quicktest: true
- job_name: mac_amd64 - job_name: mac_amd64
os: macos-latest os: macos-11
go: '>=1.24.0-rc.1' go: '1.19.x'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo' build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true quicktest: true
@@ -60,15 +56,15 @@ jobs:
deploy: true deploy: true
- job_name: mac_arm64 - job_name: mac_arm64
os: macos-latest os: macos-11
go: '>=1.24.0-rc.1' go: '1.19.x'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib' build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true deploy: true
- job_name: windows - job_name: windows
os: windows-latest os: windows-latest
go: '>=1.24.0-rc.1' go: '1.19.x'
gotags: cmount gotags: cmount
cgo: '0' cgo: '0'
build_flags: '-include "^windows/"' build_flags: '-include "^windows/"'
@@ -78,14 +74,20 @@ jobs:
- job_name: other_os - job_name: other_os
os: ubuntu-latest os: ubuntu-latest
go: '>=1.24.0-rc.1' go: '1.19.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"' build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true compile_all: true
deploy: true deploy: true
- job_name: go1.23 - job_name: go1.17
os: ubuntu-latest os: ubuntu-latest
go: '1.23' go: '1.17.x'
quicktest: true
racequicktest: true
- job_name: go1.18
os: ubuntu-latest
go: '1.18.x'
quicktest: true quicktest: true
racequicktest: true racequicktest: true
@@ -95,17 +97,19 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Install Go - name: Install Go
uses: actions/setup-go@v5 uses: actions/setup-go@v2
with: with:
stable: 'false'
go-version: ${{ matrix.go }} go-version: ${{ matrix.go }}
check-latest: true check-latest: true
- name: Set environment variables - name: Set environment variables
shell: bash
run: | run: |
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
@@ -114,25 +118,20 @@ jobs:
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
- name: Install Libraries on Linux - name: Install Libraries on Linux
shell: bash
run: | run: |
sudo modprobe fuse sudo modprobe fuse
sudo chmod 666 /dev/fuse sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf sudo chown root:$USER /etc/fuse.conf
sudo apt-get update sudo apt-get install fuse libfuse-dev rpm pkg-config
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
if: matrix.os == 'ubuntu-latest' if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS - name: Install Libraries on macOS
shell: bash
run: | run: |
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
unset HOMEBREW_NO_INSTALL_FROM_API
brew untap --force homebrew/core
brew untap --force homebrew/cask
brew update brew update
brew install --cask macfuse brew install --cask macfuse
brew install git-annex git-annex-remote-rclone if: matrix.os == 'macos-11'
if: matrix.os == 'macos-latest'
- name: Install Libraries on Windows - name: Install Libraries on Windows
shell: powershell shell: powershell
@@ -151,6 +150,7 @@ jobs:
if: matrix.os == 'windows-latest' if: matrix.os == 'windows-latest'
- name: Print Go version and environment - name: Print Go version and environment
shell: bash
run: | run: |
printf "Using go at: $(which go)\n" printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n" printf "Go version: $(go version)\n"
@@ -161,25 +161,38 @@ jobs:
printf "\n\nSystem environment:\n\n" printf "\n\nSystem environment:\n\n"
env env
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Build rclone - name: Build rclone
shell: bash
run: | run: |
make make
- name: Rclone version - name: Rclone version
shell: bash
run: | run: |
rclone version rclone version
- name: Run tests - name: Run tests
shell: bash
run: | run: |
make quicktest make quicktest
if: matrix.quicktest if: matrix.quicktest
- name: Race test - name: Race test
shell: bash
run: | run: |
make racequicktest make racequicktest
if: matrix.racequicktest if: matrix.racequicktest
- name: Run librclone tests - name: Run librclone tests
shell: bash
run: | run: |
make -C librclone/ctest test make -C librclone/ctest test
make -C librclone/ctest clean make -C librclone/ctest clean
@@ -187,124 +200,68 @@ jobs:
if: matrix.librclonetest if: matrix.librclonetest
- name: Compile all architectures test - name: Compile all architectures test
shell: bash
run: | run: |
make make
make compile_all make compile_all
if: matrix.compile_all if: matrix.compile_all
- name: Deploy built binaries - name: Deploy built binaries
shell: bash
run: | run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
make ci_beta make ci_beta
env: env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)' # working-directory: '$(modulePath)'
# Deploy binaries if enabled in config && not a PR && not a fork # Deploy binaries if enabled in config && not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone' if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint: lint:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30 timeout-minutes: 30
name: "lint" name: "lint"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Get runner parameters
id: get-runner-parameters
run: |
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Install Go - name: Code quality test
id: setup-go uses: golangci/golangci-lint-action@v3
uses: actions/setup-go@v5
with:
go-version: '>=1.23.0-rc.1'
check-latest: true
cache: false
- name: Cache
uses: actions/cache@v4
with:
path: |
~/go/pkg/mod
~/.cache/go-build
~/.cache/golangci-lint
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
- name: Code quality test (Linux)
uses: golangci/golangci-lint-action@v6
with: with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest version: latest
skip-cache: true
- name: Code quality test (Windows)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "windows"
with:
version: latest
skip-cache: true
- name: Code quality test (macOS)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "darwin"
with:
version: latest
skip-cache: true
- name: Code quality test (FreeBSD)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "freebsd"
with:
version: latest
skip-cache: true
- name: Code quality test (OpenBSD)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "openbsd"
with:
version: latest
skip-cache: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Scan for vulnerabilities
run: govulncheck ./...
- name: Scan edits of autogenerated files
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
if: github.event_name == 'pull_request'
android: android:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30 timeout-minutes: 30
name: "android-all" name: "android-all"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
# Upgrade together with NDK version # Upgrade together with NDK version
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v1
with: with:
go-version: '>=1.24.0-rc.1' go-version: 1.19.x
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Set global environment variables - name: Set global environment variables
shell: bash
run: | run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV echo "VERSION=$(make version)" >> $GITHUB_ENV
@@ -323,6 +280,7 @@ jobs:
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a Set environment variables - name: arm-v7a Set environment variables
shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
@@ -336,6 +294,7 @@ jobs:
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a . run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
- name: arm64-v8a Set environment variables - name: arm64-v8a Set environment variables
shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
@@ -348,6 +307,7 @@ jobs:
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a . run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
- name: x86 Set environment variables - name: x86 Set environment variables
shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
@@ -360,6 +320,7 @@ jobs:
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 . run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
- name: x64 Set environment variables - name: x64 Set environment variables
shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
@@ -377,4 +338,4 @@ jobs:
env: env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork # Upload artifacts if not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone' if: github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -1,212 +0,0 @@
---
# Github Actions build for rclone
# -*- compile-command: "yamllint -f parsable build_android.yml" -*-
name: Build & Push Android Builds
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
cancel-in-progress: true
# Trigger the workflow on push or pull request
on:
push:
branches:
- '**'
tags:
- '**'
pull_request:
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
android:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
- job_name: android-all
platform: linux/amd64/android/go1.24
os: ubuntu-latest
go: '>=1.24.0-rc.1'
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout Repository
uses: actions/checkout@v4
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Install Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
check-latest: true
cache: false
- name: Set Environment Variables
shell: bash
run: |
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_ENV
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_ENV
echo "VERSION=$(make version)" >> $GITHUB_ENV
- name: Set PLATFORM Variable
shell: bash
run: |
platform=${{ matrix.platform }}
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
- name: Get ImageOS
# There's no way around this, because "ImageOS" is only available to
# processes, but the setup-go action uses it in its key.
id: imageos
uses: actions/github-script@v7
with:
result-encoding: string
script: |
return process.env.ImageOS
- name: Set CACHE_PREFIX Variable
shell: bash
run: |
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
- name: Load Go Module Cache
uses: actions/cache@v4
with:
path: |
${{ env.GOMODCACHE }}
key: ${{ env.CACHE_PREFIX }}-modcache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ env.CACHE_PREFIX }}-modcache
# Both load & update the cache when on default branch
- name: Load Go Build & Test Cache
id: go-cache
uses: actions/cache@v4
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request'
with:
path: |
${{ env.GOCACHE }}
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ env.CACHE_PREFIX }}-cache
# Only load the cache when not on default branch
- name: Load Go Build & Test Cache
id: go-cache-restore
uses: actions/cache/restore@v4
if: github.ref_name != github.event.repository.default_branch || github.event_name == 'pull_request'
with:
path: |
${{ env.GOCACHE }}
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ env.CACHE_PREFIX }}-cache
- name: Build Native rclone
shell: bash
run: |
make
- name: Install gomobile
shell: bash
run: |
go install golang.org/x/mobile/cmd/gobind@latest
go install golang.org/x/mobile/cmd/gomobile@latest
env PATH=$PATH:~/go/bin gomobile init
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
- name: arm-v7a - gomobile build
shell: bash
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a - Set Environment Variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a - Build
shell: bash
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
- name: arm64-v8a - Set Environment Variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a - Build
shell: bash
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
- name: x86 - Set Environment Variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x86 - Build
shell: bash
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
- name: x64 - Set Environment Variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 - Build
shell: bash
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
- name: Delete Existing Cache
continue-on-error: true
shell: bash
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
for cache_id in "${cache_ids[@]}"; do
echo "Deleting Cache: $cache_id"
gh cache delete "$cache_id"
done
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request' && steps.go-cache.outputs.cache-hit != 'true'
- name: Deploy Built Binaries
shell: bash
run: |
make ci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -1,329 +1,26 @@
--- name: Docker beta build
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
name: Build & Push Docker Images
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
cancel-in-progress: true
# Trigger the workflow on push or pull request
on: on:
push: push:
branches: branches:
- '**' - master
tags:
- '**'
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs: jobs:
build-image: build:
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request') if: github.repository == 'rclone/rclone'
timeout-minutes: 60 runs-on: ubuntu-latest
strategy: name: Build image job
fail-fast: false
matrix:
include:
- platform: linux/amd64
runs-on: ubuntu-24.04
- platform: linux/386
runs-on: ubuntu-24.04
- platform: linux/arm64
runs-on: ubuntu-24.04-arm
- platform: linux/arm/v7
runs-on: ubuntu-24.04-arm
- platform: linux/arm/v6
runs-on: ubuntu-24.04-arm
name: Build Docker Image for ${{ matrix.platform }}
runs-on: ${{ matrix.runs-on }}
steps: steps:
- name: Checkout Repository - name: Checkout master
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Build and publish image
- name: Set REPO_NAME Variable uses: ilteoood/docker_buildx@1.1.0
shell: bash
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Set PLATFORM Variable
shell: bash
run: |
platform=${{ matrix.platform }}
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
- name: Set CACHE_NAME Variable
shell: python
env:
GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
run: |
import os, re
def slugify(input_string, max_length=63):
slug = input_string.lower()
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
slug = slug.strip()
slug = re.sub(r'\s+', '-', slug)
slug = re.sub(r'-+', '-', slug)
slug = slug[:max_length]
slug = re.sub(r'[-]+$', '', slug)
return slug
ref_name_slug = "cache"
if os.environ.get("GITHUB_REF_NAME"):
if os.environ['GITHUB_EVENT_NAME'] == "pull_request":
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
elif os.environ['GITHUB_REF_NAME'] != os.environ['GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH']:
ref_name_slug += "-ref-" + slugify(os.environ['GITHUB_REF_NAME'])
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"CACHE_NAME={ref_name_slug}\n")
- name: Get ImageOS
# There's no way around this, because "ImageOS" is only available to
# processes, but the setup-go action uses it in its key.
id: imageos
uses: actions/github-script@v7
with: with:
result-encoding: string tag: beta
script: | imageName: rclone/rclone
return process.env.ImageOS platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
- name: Set CACHE_PREFIX Variable dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
shell: bash dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
run: |
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}-docker-go
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
with:
images: |
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Setup QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Load Go Build Cache for Docker
id: go-cache
uses: actions/cache@v4
if: github.ref_name == github.event.repository.default_branch
with:
# Cache only the go builds, the module download is cached via the docker layer caching
path: |
/tmp/go-build-cache
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ env.CACHE_PREFIX }}-cache
- name: Load Go Build Cache for Docker
id: go-cache-restore
uses: actions/cache/restore@v4
if: github.ref_name != github.event.repository.default_branch
with:
# Cache only the go builds, the module download is cached via the docker layer caching
path: |
/tmp/go-build-cache
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ env.CACHE_PREFIX }}-cache
- name: Inject Go Build Cache into Docker
uses: reproducible-containers/buildkit-cache-dance@v3
with:
cache-map: |
{
"/tmp/go-build-cache": "/root/.cache/go-build"
}
skip-extraction: ${{ steps.go-cache.outputs.cache-hit || steps.go-cache-restore.outputs.cache-hit }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and Publish Image Digest
id: build
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
provenance: false
# don't specify 'tags' here (error "get can't push tagged ref by digest")
# tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
annotations: ${{ steps.meta.outputs.annotations }}
platforms: ${{ matrix.platform }}
outputs: |
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
cache-from: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }}
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-cache
cache-to: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }},image-manifest=true,mode=max,compression=zstd
- name: Export Image Digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload Image Digest
uses: actions/upload-artifact@v4
with:
name: digests-${{ env.PLATFORM }}
path: /tmp/digests/*
retention-days: 1
if-no-files-found: error
- name: Delete Existing Cache
if: github.ref_name == github.event.repository.default_branch && steps.go-cache.outputs.cache-hit != 'true'
continue-on-error: true
shell: bash
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
for cache_id in "${cache_ids[@]}"; do
echo "Deleting Cache: $cache_id"
gh cache delete "$cache_id"
done
merge-image:
name: Merge & Push Final Docker Image
runs-on: ubuntu-24.04
needs:
- build-image
steps:
- name: Download Image Digests
uses: actions/download-artifact@v4
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- name: Set REPO_NAME Variable
shell: bash
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
with:
images: |
${{ env.REPO_NAME }}
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Extract Tags
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
tags_string = " ".join(tags)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"TAGS={tags_string}\n")
- name: Extract Annotations
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
annotations_string = " ".join(annotations)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"ANNOTATIONS={annotations_string}\n")
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create & Push Manifest List
working-directory: /tmp/digests
run: |
docker buildx imagetools create \
${{ env.TAGS }} \
${{ env.ANNOTATIONS }} \
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
- name: Inspect and Run Multi-Platform Image
run: |
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version

View File

@@ -1,49 +0,0 @@
---
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
name: Release Build for Docker Plugin
on:
release:
types: [published]
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
build_docker_volume_plugin:
if: inputs.manual || github.repository == 'rclone/rclone'
name: Build docker plugin job
runs-on: ubuntu-latest
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

View File

@@ -0,0 +1,59 @@
name: Docker release build
on:
release:
types: [published]
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Get actual patch version
id: actual_patch_version
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
- name: Get actual minor version
id: actual_minor_version
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'
needs: build
runs-on: ubuntu-latest
name: Build docker plugin job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

View File

@@ -1,104 +0,0 @@
---
# Github Actions build for rclone
# -*- compile-command: "yamllint -f parsable lint.yml" -*-
name: Lint & Vulnerability Check
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
cancel-in-progress: true
# Trigger the workflow on push or pull request
on:
push:
branches:
- '**'
tags:
- '**'
pull_request:
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
lint:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
timeout-minutes: 30
name: "lint"
runs-on: ubuntu-latest
steps:
- name: Get runner parameters
id: get-runner-parameters
shell: bash
run: |
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
- name: Checkout
uses: actions/checkout@v4
- name: Install Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version: '>=1.23.0-rc.1'
check-latest: true
cache: false
- name: Cache
uses: actions/cache@v4
with:
path: |
~/go/pkg/mod
~/.cache/go-build
~/.cache/golangci-lint
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
- name: Code quality test (Linux)
uses: golangci/golangci-lint-action@v6
with:
version: latest
skip-cache: true
- name: Code quality test (Windows)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "windows"
with:
version: latest
skip-cache: true
- name: Code quality test (macOS)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "darwin"
with:
version: latest
skip-cache: true
- name: Code quality test (FreeBSD)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "freebsd"
with:
version: latest
skip-cache: true
- name: Code quality test (OpenBSD)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "openbsd"
with:
version: latest
skip-cache: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Scan for vulnerabilities
run: govulncheck ./...

View File

@@ -1,15 +0,0 @@
name: Notify users based on issue labels
on:
issues:
types: [labeled]
jobs:
notify:
runs-on: ubuntu-latest
steps:
- uses: jenschelkopf/issue-label-notification-action@1.3
with:
token: ${{ secrets.NOTIFY_ACTION_TOKEN }}
recipients: |
Support Contract=@rclone/support

View File

@@ -1,14 +0,0 @@
name: Publish to Winget
on:
release:
types: [released]
jobs:
publish:
runs-on: ubuntu-latest
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: Rclone.Rclone
installers-regex: '-windows-\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}

9
.gitignore vendored
View File

@@ -3,20 +3,15 @@ _junk/
rclone rclone
rclone.exe rclone.exe
build build
/docs/public/ docs/public
/docs/.hugo_build.lock
/docs/static/img/logos/
rclone.iml rclone.iml
.idea .idea
.history .history
.vscode
*.test *.test
*.log
*.iml *.iml
fuzz-build.zip fuzz-build.zip
*.orig *.orig
*.rej *.rej
Thumbs.db Thumbs.db
__pycache__ __pycache__
.DS_Store
resource_windows_*.syso
.devcontainer

View File

@@ -2,18 +2,15 @@
linters: linters:
enable: enable:
- deadcode
- errcheck - errcheck
- goimports - goimports
- revive - revive
- ineffassign - ineffassign
- structcheck
- varcheck
- govet - govet
- unconvert - unconvert
- staticcheck
- gosimple
- stylecheck
- unused
- misspell
- gocritic
#- prealloc #- prealloc
#- maligned #- maligned
disable-all: true disable-all: true
@@ -28,117 +25,6 @@ issues:
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3. # Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0 max-same-issues: 0
exclude-rules:
- linters:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
# don't disable the revive messages about comments on exported functions
include:
- EXC0012
- EXC0013
- EXC0014
- EXC0015
run: run:
# timeout for analysis, e.g. 30s, 5m, default is 1m # timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m timeout: 10m
linters-settings:
revive:
# setting rules seems to disable all the rules, so re-enable them here
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
- name: empty-block
disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
- name: increment-decrement
disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
gocritic:
# Enable all default checks with some exceptions and some additions (commented).
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
disable-all: true
enabled-checks:
#- appendAssign # Enabled by default
- argOrder
- assignOp
- badCall
- badCond
#- captLocal # Enabled by default
- caseOrder
- codegenComment
#- commentFormatting # Enabled by default
- defaultCaseOrder
- deprecatedComment
- dupArg
- dupBranchBody
- dupCase
- dupSubExpr
- elseif
#- exitAfterDefer # Enabled by default
- flagDeref
- flagName
#- ifElseChain # Enabled by default
- mapKey
- newDeref
- offBy1
- regexpMust
- ruleguard # Not enabled by default
#- singleCaseSwitch # Enabled by default
- sloppyLen
- sloppyTypeAssert
- switchTrue
- typeSwitchVar
- underef
- unlambda
- unslice
- valSwap
- wrapperFunc
settings:
ruleguard:
rules: "${configDir}/bin/rules.go"

View File

@@ -1,8 +1,8 @@
# Contributing to rclone # Contributing to rclone #
This is a short guide on how to contribute things to rclone. This is a short guide on how to contribute things to rclone.
## Reporting a bug ## Reporting a bug ##
If you've just got a question or aren't sure if you've found a bug If you've just got a question or aren't sure if you've found a bug
then please use the [rclone forum](https://forum.rclone.org/) instead then please use the [rclone forum](https://forum.rclone.org/) instead
@@ -12,13 +12,13 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/): with the [latest beta of rclone](https://beta.rclone.org/):
- Rclone version (e.g. output from `rclone version`) * Rclone version (e.g. output from `rclone version`)
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit) * Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`) * The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
- A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`) * A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
- if the log contains secrets then edit the file with a text editor first to obscure them * if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a new feature or bug fix ## Submitting a new feature or bug fix ##
If you find a bug that you'd like to fix, or a new feature that you'd If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub. like to implement then please submit a pull request via GitHub.
@@ -73,9 +73,9 @@ This is typically enough if you made a simple bug fix, otherwise please read the
Make sure you Make sure you
- Add [unit tests](#testing) for a new feature. * Add [unit tests](#testing) for a new feature.
- Add [documentation](#writing-documentation) for a new feature. * Add [documentation](#writing-documentation) for a new feature.
- [Commit your changes](#committing-your-changes) using the [commit message guidelines](#commit-messages). * [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
When you are done with that push your changes to GitHub: When you are done with that push your changes to GitHub:
@@ -88,9 +88,9 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits). You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
## Using Git and GitHub ## Using Git and GitHub ##
### Committing your changes ### Committing your changes ###
Follow the guideline for [commit messages](#commit-messages) and then: Follow the guideline for [commit messages](#commit-messages) and then:
@@ -107,7 +107,7 @@ You can modify the message or changes in the latest commit using:
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits). If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits ### Replacing your previously pushed commits ###
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub. Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
@@ -115,7 +115,7 @@ Your previously pushed commits are replaced by:
git push --force origin my-new-feature git push --force origin my-new-feature
### Basing your changes on the latest master ### Basing your changes on the latest master ###
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream): To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
@@ -149,21 +149,13 @@ If you squash commits that have been pushed to GitHub, then you will have to [re
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation. Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
### GitHub Continuous Integration ### GitHub Continuous Integration ###
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository. rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
## Testing ## Testing ##
### Code quality tests ### Quick testing ###
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
You can run them with `make check` or with `golangci-lint run ./...`.
Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
### Quick testing
rclone's tests are run from the go testing framework, so at the top rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests. level you can run this to run all the tests.
@@ -176,7 +168,7 @@ You can also use `make`, if supported by your platform
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub. The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
### Backend testing ### Backend testing ###
rclone contains a mixture of unit tests and integration tests. rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud Because it is difficult (and in some respects pointless) to test cloud
@@ -209,9 +201,9 @@ altogether with an HTML report and test retries then from the
project root: project root:
go install github.com/rclone/rclone/fstest/test_all go install github.com/rclone/rclone/fstest/test_all
test_all -backends drive test_all -backend drive
### Full integration testing ### Full integration testing ###
If you want to run all the integration tests against all the remotes, If you want to run all the integration tests against all the remotes,
then change into the project root and run then change into the project root and run
@@ -226,56 +218,55 @@ The commands may require some extra go packages which you can install with
The full integration tests are run daily on the integration test server. You can The full integration tests are run daily on the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/ find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ## Code Organisation ##
Rclone code is organised into a small number of top level directories Rclone code is organised into a small number of top level directories
with modules beneath. with modules beneath.
- backend - the rclone backends for interfacing to cloud providers - * backend - the rclone backends for interfacing to cloud providers -
- all - import this to load all the cloud providers * all - import this to load all the cloud providers
- ...providers * ...providers
- bin - scripts for use while building or maintaining rclone * bin - scripts for use while building or maintaining rclone
- cmd - the rclone commands * cmd - the rclone commands
- all - import this to load all the commands * all - import this to load all the commands
- ...commands * ...commands
- cmdtest - end-to-end tests of commands, flags, environment variables,... * cmdtest - end-to-end tests of commands, flags, environment variables,...
- docs - the documentation and website * docs - the documentation and website
- content - adjust these docs only - everything else is autogenerated * content - adjust these docs only - everything else is autogenerated
- command - these are auto-generated - edit the corresponding .go file * command - these are auto-generated - edit the corresponding .go file
- fs - main rclone definitions - minimal amount of code * fs - main rclone definitions - minimal amount of code
- accounting - bandwidth limiting and statistics * accounting - bandwidth limiting and statistics
- asyncreader - an io.Reader which reads ahead * asyncreader - an io.Reader which reads ahead
- config - manage the config file and flags * config - manage the config file and flags
- driveletter - detect if a name is a drive letter * driveletter - detect if a name is a drive letter
- filter - implements include/exclude filtering * filter - implements include/exclude filtering
- fserrors - rclone specific error handling * fserrors - rclone specific error handling
- fshttp - http handling for rclone * fshttp - http handling for rclone
- fspath - path handling for rclone * fspath - path handling for rclone
- hash - defines rclone's hash types and functions * hash - defines rclone's hash types and functions
- list - list a remote * list - list a remote
- log - logging facilities * log - logging facilities
- march - iterates directories in lock step * march - iterates directories in lock step
- object - in memory Fs objects * object - in memory Fs objects
- operations - primitives for sync, e.g. Copy, Move * operations - primitives for sync, e.g. Copy, Move
- sync - sync directories * sync - sync directories
- walk - walk a directory * walk - walk a directory
- fstest - provides integration test framework * fstest - provides integration test framework
- fstests - integration tests for the backends * fstests - integration tests for the backends
- mockdir - mocks an fs.Directory * mockdir - mocks an fs.Directory
- mockobject - mocks an fs.Object * mockobject - mocks an fs.Object
- test_all - Runs integration tests for everything * test_all - Runs integration tests for everything
- graphics - the images used in the website, etc. * graphics - the images used in the website, etc.
- lib - libraries used by the backend * lib - libraries used by the backend
- atexit - register functions to run when rclone exits * atexit - register functions to run when rclone exits
- dircache - directory ID to name caching * dircache - directory ID to name caching
- oauthutil - helpers for using oauth * oauthutil - helpers for using oauth
- pacer - retries with backoff and paces operations * pacer - retries with backoff and paces operations
- readers - a selection of useful io.Readers * readers - a selection of useful io.Readers
- rest - a thin abstraction over net/http for REST * rest - a thin abstraction over net/http for REST
- librclone - in memory interface to rclone's API for embedding rclone * vfs - Virtual FileSystem layer for implementing rclone mount and similar
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation ## Writing Documentation ##
If you are adding a new feature then please update the documentation. If you are adding a new feature then please update the documentation.
@@ -286,22 +277,22 @@ alphabetical order.
If you add a new backend option/flag, then it should be documented in If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field. the source file in the `Help:` field.
- Start with the most important information about the option, * Start with the most important information about the option,
as a single sentence on a single line. as a single sentence on a single line.
- This text will be used for the command-line flag help. * This text will be used for the command-line flag help.
- It will be combined with other information, such as any default value, * It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence. and the result will look odd if not written as a single sentence.
- It should end with a period/full stop character, which will be shown * It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help. in docs but automatically removed when producing the flag help.
- Try to keep it below 80 characters, to reduce text wrapping in the terminal. * Try to keep it below 80 characters, to reduce text wrapping in the terminal.
- More details can be added in a new paragraph, after an empty line (`"\n\n"`). * More details can be added in a new paragraph, after an empty line (`"\n\n"`).
- Like with docs generated from Markdown, a single line break is ignored * Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph. and two line breaks creates a new paragraph.
- This text will be shown to the user in `rclone config` * This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`, and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release). normally run some time before next release).
- To create options of enumeration type use the `Examples:` field. * To create options of enumeration type use the `Examples:` field.
- Each example value have their own `Help:` field, but they are treated * Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of create a new list item. Also, for enumeration texts like name of
@@ -321,12 +312,12 @@ combined unmodified with other information (such as any default value).
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository) Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy. for small changes in the docs which makes it very easy.
## Making a release ## Making a release ##
There are separate instructions for making a release in the RELEASE.md There are separate instructions for making a release in the RELEASE.md
file. file.
## Commit messages ## Commit messages ##
Please make the first line of your commit message a summary of the Please make the first line of your commit message a summary of the
change that a user (not a developer) of rclone would like to read, and change that a user (not a developer) of rclone would like to read, and
@@ -367,7 +358,7 @@ error fixing the hang.
Fixes #1498 Fixes #1498
``` ```
## Adding a dependency ## Adding a dependency ##
rclone uses the [go rclone uses the [go
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more) modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
@@ -379,7 +370,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency and add it to instructions below. These will fetch the dependency and add it to
`go.mod` and `go.sum`. `go.mod` and `go.sum`.
go get github.com/ncw/new_dependency GO111MODULE=on go get github.com/ncw/new_dependency
You can add constraints on that package when doing `go get` (see the You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to. go docs linked above), but don't unless you really need to.
@@ -387,15 +378,15 @@ go docs linked above), but don't unless you really need to.
Please check in the changes generated by `go mod` including `go.mod` Please check in the changes generated by `go mod` including `go.mod`
and `go.sum` in the same commit as your other changes. and `go.sum` in the same commit as your other changes.
## Updating a dependency ## Updating a dependency ##
If you need to update a dependency then run If you need to update a dependency then run
go get golang.org/x/crypto GO111MODULE=on go get -u golang.org/x/crypto
Check in a single commit as above. Check in a single commit as above.
## Updating all the dependencies ## Updating all the dependencies ##
In order to update all the dependencies then run `make update`. This In order to update all the dependencies then run `make update`. This
just uses the go modules to update all the modules to their latest just uses the go modules to update all the modules to their latest
@@ -404,7 +395,7 @@ stable release. Check in the changes in a single commit as above.
This should be done early in the release cycle to pick up new versions This should be done early in the release cycle to pick up new versions
of packages in time for them to get some testing. of packages in time for them to get some testing.
## Updating a backend ## Updating a backend ##
If you update a backend then please run the unit tests and the If you update a backend then please run the unit tests and the
integration tests for that backend. integration tests for that backend.
@@ -419,133 +410,82 @@ integration tests.
The next section goes into more detail about the tests. The next section goes into more detail about the tests.
## Writing a new backend ## Writing a new backend ##
Choose a name. The docs here will use `remote` as an example. Choose a name. The docs here will use `remote` as an example.
Note that in rclone terminology a file system backend is called a Note that in rclone terminology a file system backend is called a
remote or an fs. remote or an fs.
### Research Research
- Look at the interfaces defined in `fs/types.go` * Look at the interfaces defined in `fs/fs.go`
- Study one or more of the existing remotes * Study one or more of the existing remotes
### Getting going Getting going
- Create `backend/remote/remote.go` (copy this from a similar remote) * Create `backend/remote/remote.go` (copy this from a similar remote)
- box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache) * box is a good one to start from if you have a directory-based remote
- b2 is a good one to start from if you have a bucket-based remote * b2 is a good one to start from if you have a bucket-based remote
- Add your remote to the imports in `backend/all/all.go` * Add your remote to the imports in `backend/all/all.go`
- HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead. * HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
- Try to implement as many optional methods as possible as it makes the remote more usable. * Try to implement as many optional methods as possible as it makes the remote more usable.
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed * Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
- `rclone purge -v TestRemote:rclone-info` * `rclone purge -v TestRemote:rclone-info`
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info` * `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json` * `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
- open `remote.csv` in a spreadsheet and examine * open `remote.csv` in a spreadsheet and examine
### Guidelines for a speedy merge Unit tests
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend. * Create a config entry called `TestRemote` for the unit tests to use
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything! * Create a `backend/remote/remote_test.go` - copy and adjust your example remote
- **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments. * Make sure all tests pass with `go test -v`
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
- **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
### Unit tests Integration tests
- Create a config entry called `TestRemote` for the unit tests to use * Add your backend to `fstest/test_all/config.yaml`
- Create a `backend/remote/remote_test.go` - copy and adjust your example remote * Once you've done that then you can use the integration test framework from the project root:
- Make sure all tests pass with `go test -v` * go install ./...
* test_all -backends remote
### Integration tests
- Add your backend to `fstest/test_all/config.yaml`
- Once you've done that then you can use the integration test framework from the project root:
- go install ./...
- test_all -backends remote
Or if you want to run the integration tests manually: Or if you want to run the integration tests manually:
- Make sure integration tests pass with * Make sure integration tests pass with
- `cd fs/operations` * `cd fs/operations`
- `go test -v -remote TestRemote:` * `go test -v -remote TestRemote:`
- `cd fs/sync` * `cd fs/sync`
- `go test -v -remote TestRemote:` * `go test -v -remote TestRemote:`
- If your remote defines `ListR` check with this also * If your remote defines `ListR` check with this also
- `go test -v -remote TestRemote: -fast-list` * `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests. See the [testing](#testing) section for more information on integration tests.
### Backend documentation Add your fs to the docs - you'll need to pick an icon for it from
Add your backend to the docs - you'll need to pick an icon for it from
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
alphabetical order of full name of remote (e.g. `drive` is ordered as alphabetical order of full name of remote (e.g. `drive` is ordered as
`Google Drive`) but with the local file system last. `Google Drive`) but with the local file system last.
- `README.md` - main GitHub page * `README.md` - main GitHub page
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`) * `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
- make sure this has the `autogenerated options` comments in (see your reference backend docs) * make sure this has the `autogenerated options` comments in (see your reference backend docs)
- update them in your backend with `bin/make_backend_docs.py remote` * update them with `make backenddocs` - revert any changes in other backends
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table. * `docs/content/overview.md` - overview docs
- `docs/content/docs.md` - list of remotes in config section * `docs/content/docs.md` - list of remotes in config section
- `docs/content/_index.md` - front page of rclone.org * `docs/content/_index.md` - front page of rclone.org
- `docs/layouts/chrome/navbar.html` - add it to the website navigation * `docs/layouts/chrome/navbar.html` - add it to the website navigation
- `bin/make_manual.py` - add the page to the `docs` constant * `bin/make_manual.py` - add the page to the `docs` constant
Once you've written the docs, run `make serve` and check they look OK Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work. in the web browser and the links (internal and external) all work.
## Adding a new s3 provider ## Writing a plugin ##
It is quite easy to add a new S3 provider to rclone.
You'll need to modify the following files
- `backend/s3/s3.go`
- Add the provider to `providerOption` at the top of the file
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
- Exclude your provider from generic config questions (eg `region` and `endpoint).
- Add the provider to the `setQuirks` function - see the documentation there.
- `docs/content/s3.md`
- Add the provider at the top of the page.
- Add a section about the provider linked from there.
- Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md`
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
- `README.md` - this is the home page in github
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- `docs/content/_index.md` - this is the home page of rclone.org
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
When adding the provider, endpoints, quirks, docs etc keep them in
alphabetical order by `Provider` name, but with `AWS` first and
`Other` last.
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
Once you've written the code, test `rclone config` works to your
satisfaction, and check the integration tests work `go test -v -remote
NewS3Provider:`. You may need to adjust the quirks to get them to
pass. Some providers just can't pass the tests with control characters
in the names so if these fail and the provider doesn't support
`urlEncodeListings` in the quirks then ignore them. Note that the
`SetTier` test may also fail on non AWS providers.
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
## Writing a plugin
New features (backends, commands) can also be added "out-of-tree", through Go plugins. New features (backends, commands) can also be added "out-of-tree", through Go plugins.
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary. Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone. This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
### Usage Usage
- Naming - Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`. - Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
@@ -560,7 +500,7 @@ This is useful if you can't merge your changes upstream or don't want to maintai
- Plugins must be compiled against the exact version of rclone to work. - Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone) (The rclone used during building the plugin must be the same as the source of rclone)
### Building Building
To turn your existing additions into a Go plugin, move them to an external repository To turn your existing additions into a Go plugin, move them to an external repository
and change the top-level package name to `main`. and change the top-level package name to `main`.
@@ -571,18 +511,4 @@ Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin) [Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
## Keeping a backend or command out of tree [Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
Rclone was designed to be modular so it is very easy to keep a backend
or a command out of the main rclone source tree.
So for example if you had a backend which accessed your proprietary
systems or a command which was specialised for your needs you could
add them out of tree.
This may be easier than using a plugin and is supported on all
platforms not just macOS and Linux.
This is explained further in https://github.com/rclone/rclone_out_of_tree_example
which has an example of an out of tree backend `ram` (which is a
renamed version of the `memory` backend).

View File

@@ -1,46 +1,17 @@
FROM golang:alpine AS builder FROM golang AS builder
ARG CGO_ENABLED=0
COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/ WORKDIR /go/src/github.com/rclone/rclone/
RUN echo "**** Set Go Environment Variables ****" && \ RUN \
go env -w GOCACHE=/root/.cache/go-build CGO_ENABLED=0 \
RUN echo "**** Install Dependencies ****" && \
apk add --no-cache \
make \
bash \
gawk \
git
COPY go.mod .
COPY go.sum .
RUN echo "**** Download Go Dependencies ****" && \
go mod download -x
RUN echo "**** Verify Go Dependencies ****" && \
go mod verify
COPY . .
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
echo "**** Build Binary ****" && \
make make
RUN ./rclone version
RUN echo "**** Print Version Binary ****" && \
./rclone version
# Begin final image # Begin final image
FROM alpine:latest FROM alpine:latest
RUN echo "**** Install Dependencies ****" && \ RUN apk --no-cache add ca-certificates fuse tzdata && \
apk add --no-cache \
ca-certificates \
fuse3 \
tzdata && \
echo "Enable user_allow_other in fuse" && \
echo "user_allow_other" >> /etc/fuse.conf echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/ COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/

View File

@@ -16,13 +16,6 @@ Current active maintainers of rclone are:
| Max Sum | @Max-Sum | union backend | | Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend | | Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | storj backend | | Caleb Case | @calebcase | storj backend |
| wiserain | @wiserain | pikpak backend |
| albertony | @albertony | |
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
| Hideo Aoyama | @boukendesho | snap packaging |
| nielash | @nielash | bisync |
| Dan McArdle | @dmcardle | gitannex |
| Sam Harrison | @childish-sambino | filescom |
**This is a work in progress Draft** **This is a work in progress Draft**

24634
MANUAL.html generated

File diff suppressed because it is too large Load Diff

29368
MANUAL.md generated

File diff suppressed because it is too large Load Diff

30511
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -30,37 +30,29 @@ ifdef RELEASE_TAG
TAG := $(RELEASE_TAG) TAG := $(RELEASE_TAG)
endif endif
GO_VERSION := $(shell go version) GO_VERSION := $(shell go version)
GO_OS := $(shell go env GOOS)
ifdef BETA_SUBDIR ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR) BETA_SUBDIR := /$(BETA_SUBDIR)
endif endif
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR) BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/ BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := beta.rclone.org: BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH) BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
# Pass in GOTAGS=xyz on the make command line to set build tags # Pass in GOTAGS=xyz on the make command line to set build tags
ifdef GOTAGS ifdef GOTAGS
BUILDTAGS=-tags "$(GOTAGS)" BUILDTAGS=-tags "$(GOTAGS)"
LINTTAGS=--build-tags "$(GOTAGS)" LINTTAGS=--build-tags "$(GOTAGS)"
endif endif
LDFLAGS=--ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)"
.PHONY: rclone test_all vars version .PHONY: rclone test_all vars version
rclone: rclone:
ifeq ($(GO_OS),windows) go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
endif
go build -v $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS)
ifeq ($(GO_OS),windows)
rm resource_windows_`go env GOARCH`.syso
endif
mkdir -p `go env GOPATH`/bin/ mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE` mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
test_all: test_all:
go install $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
vars: vars:
@echo SHELL="'$(SHELL)'" @echo SHELL="'$(SHELL)'"
@@ -74,10 +66,6 @@ btest:
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip @echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
@echo "Copied markdown of beta release to clip board" @echo "Copied markdown of beta release to clip board"
btesth:
@echo "<a href="$(BETA_URL)">$(TAG)</a> on branch <a href="https://github.com/rclone/rclone/tree/$(BRANCH)">$(BRANCH)</a> (uploaded in 15-30 mins)" | xclip -r -sel clip -t text/html
@echo "Copied beta release in HTML to clip board"
version: version:
@echo '$(TAG)' @echo '$(TAG)'
@@ -93,9 +81,6 @@ quicktest:
racequicktest: racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./... RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
compiletest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
# Do source code quality checks # Do source code quality checks
check: rclone check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------" @echo "-- START CODE QUALITY REPORT -------------------------------"
@@ -104,11 +89,15 @@ check: rclone
# Get the build dependencies # Get the build dependencies
build_dep: build_dep:
go run bin/get-github-release.go -use-api -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz' go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
# Get the release dependencies we only install on linux # Get the release dependencies we only install on linux
release_dep_linux: release_dep_linux:
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
# Get the release dependencies we only install on Windows
release_dep_windows:
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
# Update dependencies # Update dependencies
showupdates: showupdates:
@@ -144,21 +133,17 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone commanddocs: rclone
-@rmdir -p '$$HOME/.config/rclone' XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
backenddocs: rclone bin/make_backend_docs.py backenddocs: rclone bin/make_backend_docs.py
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
rcdocs: rclone rcdocs: rclone
bin/make_rc_docs.sh bin/make_rc_docs.sh
install: rclone install: rclone
install -d ${DESTDIR}/usr/bin install -d ${DESTDIR}/usr/bin
install ${GOPATH}/bin/rclone ${DESTDIR}/usr/bin install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
clean: clean:
go clean ./... go clean ./...
@@ -172,7 +157,7 @@ website:
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi @if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
upload_website: website upload_website: website
rclone -v sync docs/public www.rclone.org: rclone -v sync docs/public memstore:www-rclone-org
upload_test_website: website upload_test_website: website
rclone -P sync docs/public test-rclone-org: rclone -P sync docs/public test-rclone-org:
@@ -199,8 +184,8 @@ check_sign:
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
upload: upload:
rclone -P copy build/ downloads.rclone.org:/$(TAG) rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "downloads.rclone.org:/$(TAG)/$$i" "downloads.rclone.org:/$$j"' rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
upload_github: upload_github:
./bin/upload-github $(TAG) ./bin/upload-github $(TAG)
@@ -210,7 +195,7 @@ cross: doc
beta: beta:
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone -v copy build/ pub.rclone.org:/$(TAG) rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/ @echo Beta release ready at https://pub.rclone.org/$(TAG)/
log_since_last_release: log_since_last_release:
@@ -223,18 +208,18 @@ ci_upload:
sudo chown -R $$USER build sudo chown -R $$USER build
find build -type l -delete find build -type l -delete
gzip -r9v build gzip -r9v build
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds ./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),) ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest ./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif endif
@echo Beta release ready at $(BETA_URL)/testbuilds @echo Beta release ready at $(BETA_URL)/testbuilds
ci_beta: ci_beta:
git log $(LAST_TAG).. > /tmp/git-log.txt git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),) ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR) rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
endif endif
@echo Beta release ready at $(BETA_URL) @echo Beta release ready at $(BETA_URL)
@@ -243,7 +228,7 @@ fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/ rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
serve: website serve: website
cd docs && hugo server --logLevel info -w --disableFastRender cd docs && hugo server -v -w --disableFastRender
tag: retag doc tag: retag doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new

View File

@@ -1,5 +1,3 @@
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only) [<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only) [<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
@@ -25,94 +23,65 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/) * 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/) * Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss) * Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/) * Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/) * Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/) * Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph) * Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos) * China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2) * Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/) * Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces) * DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage) * Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost) * Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/) * Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/) * Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
* FileLu [:page_facing_up:](https://rclone.org/filelu/)
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
* FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
* FTP [:page_facing_up:](https://rclone.org/ftp/) * FTP [:page_facing_up:](https://rclone.org/ftp/)
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/) * Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/) * Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/) * HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/) * HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/) * HTTP [:page_facing_up:](https://rclone.org/http/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs) * Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/) * Hubic [:page_facing_up:](https://rclone.org/hubic/)
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/) * Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) * IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos) * IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/) * Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/) * Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/) * Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* MEGA [:page_facing_up:](https://rclone.org/mega/) * Mega [:page_facing_up:](https://rclone.org/mega/)
* MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
* Memory [:page_facing_up:](https://rclone.org/memory/) * Memory [:page_facing_up:](https://rclone.org/memory/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/) * Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/) * Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio) * Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud) * Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/) * OVH [:page_facing_up:](https://rclone.org/swift/)
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/) * OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/) * OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/) * pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/) * put.io [:page_facing_up:](https://rclone.org/putio/)
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/) * QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp) * RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/) * Seafile [:page_facing_up:](https://rclone.org/seafile/)
* Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs) * SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
* SFTP [:page_facing_up:](https://rclone.org/sftp/) * SFTP [:page_facing_up:](https://rclone.org/sftp/)
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath) * StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/) * Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/) * SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos) * Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi) * Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/) * WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/) * Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/) * Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
* Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
* The local filesystem [:page_facing_up:](https://rclone.org/local/) * The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/) Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
@@ -137,7 +106,6 @@ These backends adapt or modify other storage providers
* Partial syncs supported on a whole file basis * Partial syncs supported on a whole file basis
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files * [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical * [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
* [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality * [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts * Can sync to and from network, e.g. two different cloud accounts
* Optional large file chunking ([Chunker](https://rclone.org/chunker/)) * Optional large file chunking ([Chunker](https://rclone.org/chunker/))

View File

@@ -10,7 +10,7 @@ This file describes how to make the various kinds of releases
## Making a release ## Making a release
* git checkout master # see below for stable branch * git checkout master # see below for stable branch
* git pull # IMPORTANT * git pull
* git status - make sure everything is checked in * git status - make sure everything is checked in
* Check GitHub actions build for master is Green * Check GitHub actions build for master is Green
* make test # see integration test server or run locally * make test # see integration test server or run locally
@@ -21,7 +21,6 @@ This file describes how to make the various kinds of releases
* git status - to check for new man pages - git add them * git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0" * git commit -a -v -m "Version v1.XX.0"
* make retag * make retag
* git push origin # without --follow-tags so it doesn't push the tag if it fails
* git push --follow-tags origin * git push --follow-tags origin
* # Wait for the GitHub builds to complete then... * # Wait for the GitHub builds to complete then...
* make fetch_binaries * make fetch_binaries
@@ -37,52 +36,16 @@ This file describes how to make the various kinds of releases
## Update dependencies ## Update dependencies
Early in the next release cycle update the dependencies. Early in the next release cycle update the dependencies
* Review any pinned packages in go.mod and remove if possible * Review any pinned packages in go.mod and remove if possible
* `make updatedirect` * make updatedirect
* `make GOTAGS=cmount` * make
* `make compiletest` * git commit -a -v
* Fix anything which doesn't compile at this point and commit changes here * make update
* `git commit -a -v -m "build: update all dependencies"` * make
If the `make updatedirect` upgrades the version of go in the `go.mod`
go 1.22.0
then go to manual mode. `go1.22` here is the lowest supported version
in the `go.mod`.
If `make updatedirect` added a `toolchain` directive then remove it.
We don't want to force a toolchain on our users. Linux packagers are
often using a version of Go that is a few versions out of date.
```
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
go get -d $(cat /tmp/potential-upgrades)
go mod tidy -go=1.22 -compat=1.22
```
If the `go mod tidy` fails use the output from it to remove the
package which can't be upgraded from `/tmp/potential-upgrades` when
done
```
git co go.mod go.sum
```
And try again.
Optionally upgrade the direct and indirect dependencies. This is very
likely to fail if the manual method was used abve - in that case
ignore it as it is too time consuming to fix.
* `make update`
* `make GOTAGS=cmount`
* `make compiletest`
* roll back any updates which didn't compile * roll back any updates which didn't compile
* `git commit -a -v --amend` * git commit -a -v --amend
* **NB** watch out for this changing the default go version in `go.mod`
Note that `make update` updates all direct and indirect dependencies Note that `make update` updates all direct and indirect dependencies
and there can occasionally be forwards compatibility problems with and there can occasionally be forwards compatibility problems with
@@ -90,19 +53,6 @@ doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to version specified by `make updatedirect` in order to get rclone to
build. build.
Once it compiles locally, push it on a test branch and commit fixes
until the tests pass.
### Major versions
The above procedure will not upgrade major versions, so v2 to v3.
However this tool can show which major versions might need to be
upgraded:
go run github.com/icholy/gomajor@latest list -major
Expect API breakage when updating major versions.
## Tidy beta ## Tidy beta
At some point after the release run At some point after the release run
@@ -124,69 +74,50 @@ Set vars
First make the release branch. If this is a second point release then First make the release branch. If this is a second point release then
this will be done already. this will be done already.
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0 * git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* make startstable * make startstable
Now Now
* git co ${BASE_TAG}-stable * git co ${BASE_TAG}-stable
* git cherry-pick any fixes * git cherry-pick any fixes
* make startstable
* Do the steps as above * Do the steps as above
* make startstable
* git co master * git co master
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct * `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
* git checkout ${BASE_TAG}-stable docs/content/changelog.md * git checkout ${BASE_TAG}-stable docs/content/changelog.md
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}" * git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push * git push
## Sponsor logos
If updating the website note that the sponsor logos have been moved out of the main repository.
You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
which is a private repo containing artwork from sponsors.
## Update the website between releases
Create an update website branch based off the last release
git co -b update-website
If the branch already exists, double check there are no commits that need saving.
Now reset the branch to the last release
git reset --hard v1.64.0
Create the changes, check them in, test with `make serve` then
make upload_test_website
Check out https://test.rclone.org and when happy
make upload_website
Cherry pick any changes back to master and the stable branch if it is active.
## Making a manual build of docker ## Making a manual build of docker
To do a basic build of rclone's docker image to debug builds locally: The rclone docker image should autobuild on via GitHub actions. If it doesn't
or needs to be updated then rebuild like this.
See: https://github.com/ilteoood/docker_buildx/issues/19
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
``` ```
docker buildx build --load -t rclone/rclone:testing --progress=plain . git co v1.54.1
docker run --rm rclone/rclone:testing version docker pull golang
export DOCKER_CLI_EXPERIMENTAL=enabled
docker buildx create --name actions_builder --use
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
echo "Supported platforms: $SUPPORTED_PLATFORMS"
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
docker buildx stop actions_builder
``` ```
To test the multipatform build ### Old build for linux/amd64 only
``` ```
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 . docker pull golang
``` docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.52.0
To make a full build then set the tags correctly and add `--push` docker push rclone/rclone:1.52
docker push rclone/rclone:1
Note that you can't only build one architecture - you need to build them all. docker push rclone/rclone:latest
```
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
``` ```

View File

@@ -1 +1 @@
v1.71.0 v1.60.0

View File

@@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
configfile.Install() configfile.Install()
// Configure the remote // Configure the remote
config.FileSetValue(remoteName, "type", "alias") config.FileSet(remoteName, "type", "alias")
config.FileSetValue(remoteName, "remote", root) config.FileSet(remoteName, "remote", root)
} }
func TestNewFS(t *testing.T) { func TestNewFS(t *testing.T) {
@@ -81,12 +81,10 @@ func TestNewFS(t *testing.T) {
for i, gotEntry := range gotEntries { for i, gotEntry := range gotEntries {
what := fmt.Sprintf("%s, entry=%d", what, i) what := fmt.Sprintf("%s, entry=%d", what, i)
wantEntry := test.entries[i] wantEntry := test.entries[i]
_, isDir := gotEntry.(fs.Directory)
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what) require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
if !isDir {
require.Equal(t, wantEntry.size, gotEntry.Size(), what) require.Equal(t, wantEntry.size, gotEntry.Size(), what)
} _, isDir := gotEntry.(fs.Directory)
require.Equal(t, wantEntry.isDir, isDir, what) require.Equal(t, wantEntry.isDir, isDir, what)
} }
} }

View File

@@ -4,37 +4,30 @@ package all
import ( import (
// Active file systems // Active file systems
_ "github.com/rclone/rclone/backend/alias" _ "github.com/rclone/rclone/backend/alias"
_ "github.com/rclone/rclone/backend/amazonclouddrive"
_ "github.com/rclone/rclone/backend/azureblob" _ "github.com/rclone/rclone/backend/azureblob"
_ "github.com/rclone/rclone/backend/azurefiles"
_ "github.com/rclone/rclone/backend/b2" _ "github.com/rclone/rclone/backend/b2"
_ "github.com/rclone/rclone/backend/box" _ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache" _ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker" _ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/cloudinary"
_ "github.com/rclone/rclone/backend/combine" _ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress" _ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/doi"
_ "github.com/rclone/rclone/backend/drive" _ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox" _ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier" _ "github.com/rclone/rclone/backend/fichier"
_ "github.com/rclone/rclone/backend/filefabric" _ "github.com/rclone/rclone/backend/filefabric"
_ "github.com/rclone/rclone/backend/filelu"
_ "github.com/rclone/rclone/backend/filescom"
_ "github.com/rclone/rclone/backend/ftp" _ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/gofile"
_ "github.com/rclone/rclone/backend/googlecloudstorage" _ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos" _ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hasher" _ "github.com/rclone/rclone/backend/hasher"
_ "github.com/rclone/rclone/backend/hdfs" _ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive" _ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http" _ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/iclouddrive" _ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/imagekit"
_ "github.com/rclone/rclone/backend/internetarchive" _ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud" _ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr" _ "github.com/rclone/rclone/backend/koofr"
_ "github.com/rclone/rclone/backend/linkbox"
_ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/mailru" _ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega" _ "github.com/rclone/rclone/backend/mega"
@@ -42,25 +35,18 @@ import (
_ "github.com/rclone/rclone/backend/netstorage" _ "github.com/rclone/rclone/backend/netstorage"
_ "github.com/rclone/rclone/backend/onedrive" _ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive" _ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud" _ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/pixeldrain"
_ "github.com/rclone/rclone/backend/premiumizeme" _ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio" _ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor" _ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/quatrix"
_ "github.com/rclone/rclone/backend/s3" _ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile" _ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp" _ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile" _ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia" _ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/smb"
_ "github.com/rclone/rclone/backend/storj" _ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync" _ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift" _ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/ulozto"
_ "github.com/rclone/rclone/backend/union" _ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox" _ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav" _ "github.com/rclone/rclone/backend/webdav"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
// Test AmazonCloudDrive filesystem interface
//go:build acd
// +build acd
package amazonclouddrive_test
import (
"testing"
"github.com/rclone/rclone/backend/amazonclouddrive"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
fstests.RemoteName = "TestAmazonCloudDrive:"
fstests.Run(t)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,151 +1,36 @@
//go:build !plan9 && !solaris && !js //go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob package azureblob
import ( import (
"context"
"encoding/base64"
"strings"
"testing" "testing"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestBlockIDCreator(t *testing.T) { func (f *Fs) InternalTest(t *testing.T) {
// Check creation and random number // Check first feature flags are set on this
bic, err := newBlockIDCreator() // remote
require.NoError(t, err)
bic2, err := newBlockIDCreator()
require.NoError(t, err)
assert.NotEqual(t, bic.random, bic2.random)
assert.NotEqual(t, bic.random, [8]byte{})
// Set random to known value for tests
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
chunkNumber := uint64(0xFEDCBA9876543210)
// Check creation of ID
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
got := bic.newBlockID(chunkNumber)
assert.Equal(t, want, got)
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
// Test checkID is working
assert.NoError(t, bic.checkID(chunkNumber, got))
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
}
func (f *Fs) testFeatures(t *testing.T) {
// Check first feature flags are set on this remote
enabled := f.Features().SetTier enabled := f.Features().SetTier
assert.True(t, enabled) assert.True(t, enabled)
enabled = f.Features().GetTier enabled = f.Features().GetTier
assert.True(t, enabled) assert.True(t, enabled)
} }
type ReadSeekCloser struct { func TestIncrement(t *testing.T) {
*strings.Reader for _, test := range []struct {
} in []byte
want []byte
func (r *ReadSeekCloser) Close() error { }{
return nil {[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
} {[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
// Stage a block at remote but don't commit it {[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) { {[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
var ( {[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
containerName, blobPath = f.split(remote) } {
containerClient = f.cntSVC(containerName) increment(test.in)
blobClient = containerClient.NewBlockBlobClient(blobPath) assert.Equal(t, test.want, test.in)
data = "uncommitted data"
blockID = "1"
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
)
r := &ReadSeekCloser{strings.NewReader(data)}
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
require.NoError(t, err)
// Verify the block is staged but not committed
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
require.NoError(t, err)
found := false
for _, block := range blockList.UncommittedBlocks {
if *block.Name == blockIDBase64 {
found = true
break
} }
}
require.True(t, found, "Block ID not found in uncommitted blocks")
}
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
//
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
//
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
var (
ctx = context.Background()
remote = "testBlob"
)
// Multipart copy the blob please
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
f.opt.UseCopyBlob = false
f.opt.CopyCutoff = f.opt.ChunkSize
defer func() {
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
}()
// Create a blob with uncommitted blocks
f.stageBlockWithoutCommit(ctx, t, remote)
// Now attempt to overwrite the block with a different sized block ID to provoke this error
// Check the object does not exist
_, err := f.NewObject(ctx, remote)
require.Equal(t, fs.ErrorObjectNotFound, err)
// Upload a multipart file over the block with uncommitted chunks of a different ID size
size := 4*int(f.opt.ChunkSize) - 1
contents := random.String(size)
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
// Check size
assert.Equal(t, int64(size), o.Size())
// Create a new blob with uncommitted blocks
newRemote := "testBlob2"
f.stageBlockWithoutCommit(ctx, t, newRemote)
// Copy over that block
dst, err := f.Copy(ctx, o, newRemote)
require.NoError(t, err)
// Check basics
assert.Equal(t, int64(size), dst.Size())
assert.Equal(t, newRemote, dst.Remote())
// Check contents
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents)
// Remove the object
require.NoError(t, dst.Remove(ctx))
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Features", f.testFeatures)
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
} }

View File

@@ -1,51 +1,26 @@
// Test AzureBlob filesystem interface // Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js //go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob package azureblob
import ( import (
"context"
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: name + ":", RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil), NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool", "Cold"}, TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{ ChunkedUpload: fstests.ChunkedUploadConfig{},
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "use_copy_blob", Value: "false"},
},
})
}
// TestIntegration2 runs integration tests against the remote
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool", "Cold"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
{Name: name, Key: "use_copy_blob", Value: "false"},
},
}) })
} }
@@ -53,15 +28,40 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs) return f.setUploadChunkSize(cs)
} }
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var ( var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
) )
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
func TestServicePrincipalFileSuccess(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"password": "my secret",
"tenant": "my active directory tenant ID"
}
`
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
if assert.NoError(t, err) {
assert.NotNil(t, tokenRefresher)
}
}
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
func TestServicePrincipalFileFailure(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"tenant": "my active directory tenant ID"
}
`
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
assert.Error(t, err)
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
}
func TestValidateAccessTier(t *testing.T) { func TestValidateAccessTier(t *testing.T) {
tests := map[string]struct { tests := map[string]struct {
accessTier string accessTier string
@@ -71,7 +71,6 @@ func TestValidateAccessTier(t *testing.T) {
"HOT": {"HOT", true}, "HOT": {"HOT", true},
"Hot": {"Hot", true}, "Hot": {"Hot", true},
"cool": {"cool", true}, "cool": {"cool", true},
"cold": {"cold", true},
"archive": {"archive", true}, "archive": {"archive", true},
"empty": {"", false}, "empty": {"", false},
"unknown": {"unknown", false}, "unknown": {"unknown", false},

View File

@@ -2,6 +2,6 @@
// about "no buildable Go source files " // about "no buildable Go source files "
//go:build plan9 || solaris || js //go:build plan9 || solaris || js
// +build plan9 solaris js
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
package azureblob package azureblob

137
backend/azureblob/imds.go Normal file
View File

@@ -0,0 +1,137 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
const (
azureResource = "https://storage.azure.com"
imdsAPIVersion = "2018-02-01"
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
)
// This custom type is used to add the port the test server has bound to
// to the request context.
type testPortKey string
type msiIdentifierType int
const (
msiClientID msiIdentifierType = iota
msiObjectID
msiResourceID
)
type userMSI struct {
Type msiIdentifierType
Value string
}
type httpError struct {
Response *http.Response
}
func (e httpError) Error() string {
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
}
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
// Metadata Service.
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// Attempt to get an MSI token; silently continue if unsuccessful.
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
result := adal.Token{}
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
if err != nil {
fs.Debugf(nil, "Failed to create request: %v", err)
return result, err
}
params := req.URL.Query()
params.Set("resource", azureResource)
params.Set("api-version", imdsAPIVersion)
// Specify user-assigned identity if requested.
if identity != nil {
switch identity.Type {
case msiClientID:
params.Set("client_id", identity.Value)
case msiObjectID:
params.Set("object_id", identity.Value)
case msiResourceID:
params.Set("mi_res_id", identity.Value)
default:
// If this happens, the calling function and this one don't agree on
// what valid ID types exist.
return result, fmt.Errorf("unknown MSI identity type specified")
}
}
req.URL.RawQuery = params.Encode()
// The Metadata header is required by all calls to IMDS.
req.Header.Set("Metadata", "true")
// If this function is run in a test, query the test server instead of IMDS.
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
if isTest {
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
req.Host = req.URL.Host
}
// Send request
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
}
err = resp.Body.Close()
if err != nil {
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
}
}()
// Check if the status code indicates success
// The request returns 200 currently, add 201 and 202 as well for possible extension.
switch resp.StatusCode {
case 200, 201, 202:
break
default:
body, _ := ioutil.ReadAll(resp.Body)
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
return result, httpError{Response: resp}
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
// This would be a good place to persist the token if a large number of rclone
// invocations are being made in a short amount of time. If the token is
// persisted, the azureblob code will need to check for expiry before every
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, fmt.Errorf("couldn't unmarshal IMDS response: %w", err)
}
return result, nil
}

View File

@@ -0,0 +1,118 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
require.NoError(t, err)
parameters := r.URL.Query()
(*actual)["path"] = r.URL.Path
(*actual)["Metadata"] = r.Header.Get("Metadata")
(*actual)["method"] = r.Method
for paramName := range parameters {
(*actual)[paramName] = parameters.Get(paramName)
}
// Make response.
response := adal.Token{}
responseBytes, err := json.Marshal(response)
require.NoError(t, err)
_, err = w.Write(responseBytes)
require.NoError(t, err)
}
}
func TestManagedIdentity(t *testing.T) {
// test user-assigned identity specifiers to use
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
tests := []struct {
identity *userMSI
identityParameterName string
expectedAbsent []string
}{
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
}
alwaysExpected := map[string]string{
"path": "/metadata/identity/oauth2/token",
"resource": "https://storage.azure.com",
"Metadata": "true",
"api-version": "2018-02-01",
"method": "GET",
}
for _, test := range tests {
actual := make(map[string]string, 10)
testServer := httptest.NewServer(handler(t, &actual))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, test.identity)
require.NoError(t, err)
// Validate expected query parameters present
expected := make(map[string]string)
for k, v := range alwaysExpected {
expected[k] = v
}
if test.identity != nil {
expected[test.identityParameterName] = test.identity.Value
}
for key := range expected {
value, exists := actual[key]
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
test.identityParameterName, key) {
assert.Equalf(t, expected[key], value,
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
}
}
// Validate unexpected query parameters absent
for _, key := range test.expectedAbsent {
_, exists := actual[key]
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
}
}
}
func errorHandler(resultCode int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Test error generated", resultCode)
}
}
func TestIMDSErrors(t *testing.T) {
errorCodes := []int{404, 429, 500}
for _, code := range errorCodes {
testServer := httptest.NewServer(errorHandler(code))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, nil)
require.Error(t, err)
httpErr, ok := err.(httpError)
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,69 +0,0 @@
//go:build !plan9 && !js
package azurefiles
import (
"context"
"math/rand"
"strings"
"testing"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
)
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Authentication", f.InternalTestAuth)
}
var _ fstests.InternalTester = (*Fs)(nil)
func (f *Fs) InternalTestAuth(t *testing.T) {
t.Skip("skipping since this requires authentication credentials which are not part of repo")
shareName := "test-rclone-oct-2023"
testCases := []struct {
name string
options *Options
}{
{
name: "ConnectionString",
options: &Options{
ShareName: shareName,
ConnectionString: "",
},
},
{
name: "AccountAndKey",
options: &Options{
ShareName: shareName,
Account: "",
Key: "",
}},
{
name: "SASUrl",
options: &Options{
ShareName: shareName,
SASURL: "",
}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fs, err := newFsFromOptions(context.TODO(), "TestAzureFiles", "", tc.options)
assert.NoError(t, err)
dirName := randomString(10)
assert.NoError(t, fs.Mkdir(context.TODO(), dirName))
})
}
}
const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
func randomString(charCount int) string {
strBldr := strings.Builder{}
for range charCount {
randPos := rand.Int63n(52)
strBldr.WriteByte(chars[randPos])
}
return strBldr.String()
}

View File

@@ -1,17 +0,0 @@
//go:build !plan9 && !js
package azurefiles
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
func TestIntegration(t *testing.T) {
var objPtr *Object
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureFiles:",
NilObject: objPtr,
})
}

View File

@@ -1,7 +0,0 @@
// Build for azurefiles for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || js
// Package azurefiles provides an interface to Microsoft Azure Files
package azurefiles

View File

@@ -37,15 +37,6 @@ type Bucket struct {
AccountID string `json:"accountId"` AccountID string `json:"accountId"`
Name string `json:"bucketName"` Name string `json:"bucketName"`
Type string `json:"bucketType"` Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}
// LifecycleRule is a single lifecycle rule
type LifecycleRule struct {
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"`
FileNamePrefix string `json:"fileNamePrefix"`
} }
// Timestamp is a UTC time when this file was uploaded. It is a base // Timestamp is a UTC time when this file was uploaded. It is a base
@@ -133,7 +124,7 @@ type AuthorizeAccountResponse struct {
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket. BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has. Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"` } `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files. APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header. AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
@@ -218,7 +209,6 @@ type CreateBucketRequest struct {
AccountID string `json:"accountId"` AccountID string `json:"accountId"`
Name string `json:"bucketName"` Name string `json:"bucketName"`
Type string `json:"bucketType"` Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
} }
// DeleteBucketRequest is used to create a bucket // DeleteBucketRequest is used to create a bucket
@@ -341,11 +331,3 @@ type CopyPartRequest struct {
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1) PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied. Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
} }
// UpdateBucketRequest describes a request to modify a B2 bucket
type UpdateBucketRequest struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Type string `json:"bucketType,omitempty"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}

View File

@@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
} }
func TestTimestampEqual(t *testing.T) { func TestTimestampEqual(t *testing.T) {
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver assert.False(t, emptyT.Equal(emptyT))
assert.False(t, t0.Equal(emptyT)) assert.False(t, t0.Equal(emptyT))
assert.False(t, emptyT.Equal(t0)) assert.False(t, emptyT.Equal(t0))
assert.False(t, t0.Equal(t1)) assert.False(t, t0.Equal(t1))
assert.False(t, t1.Equal(t0)) assert.False(t, t1.Equal(t0))
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver assert.True(t, t0.Equal(t0))
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver assert.True(t, t1.Equal(t1))
} }

View File

@@ -9,14 +9,12 @@ import (
"bytes" "bytes"
"context" "context"
"crypto/sha1" "crypto/sha1"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
gohash "hash" gohash "hash"
"io" "io"
"net/http" "net/http"
"path" "path"
"slices"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@@ -31,11 +29,9 @@ import (
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool" "github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
@@ -62,7 +58,8 @@ const (
defaultChunkSize = 96 * fs.Mebi defaultChunkSize = 96 * fs.Mebi
defaultUploadCutoff = 200 * fs.Mebi defaultUploadCutoff = 200 * fs.Mebi
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
defaultMaxAge = 24 * time.Hour memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
) )
// Globals // Globals
@@ -77,17 +74,14 @@ func init() {
Name: "b2", Name: "b2",
Description: "Backblaze B2", Description: "Backblaze B2",
NewFs: NewFs, NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "account", Name: "account",
Help: "Account ID or Application Key ID.", Help: "Account ID or Application Key ID.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "key", Name: "key",
Help: "Application Key.", Help: "Application Key.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.", Help: "Endpoint for the service.\n\nLeave blank normally.",
@@ -104,7 +98,7 @@ below will cause b2 to return specific errors:
* "force_cap_exceeded" * "force_cap_exceeded"
These will be set in the "X-Bz-Test-Mode" header which is documented These will be set in the "X-Bz-Test-Mode" header which is documented
in the [b2 integrations checklist](https://www.backblaze.com/docs/cloud-storage-integration-checklist).`, in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
Default: "", Default: "",
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
Advanced: true, Advanced: true,
@@ -153,18 +147,6 @@ might a maximum of "--transfers" chunks in progress at once.
5,000,000 Bytes is the minimum size.`, 5,000,000 Bytes is the minimum size.`,
Default: defaultChunkSize, Default: defaultChunkSize,
Advanced: true, Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
Note that chunks are stored in memory and there may be up to
"--transfers" * "--b2-upload-concurrency" chunks stored at once
in memory.`,
Default: 4,
Advanced: true,
}, { }, {
Name: "disable_checksum", Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files. Help: `Disable checksums for large (> upload cutoff) files.
@@ -196,57 +178,29 @@ Example:
Advanced: true, Advanced: true,
}, { }, {
Name: "download_auth_duration", Name: "download_auth_duration",
Help: `Time before the public link authorization token will expire in s or suffix ms|s|m|h|d. Help: `Time before the authorization token will expire in s or suffix ms|s|m|h|d.
This is used in combination with "rclone link" for making files
accessible to the public and sets the duration before the download
authorization token will expire.
The duration before the download authorization token will expire.
The minimum value is 1 second. The maximum value is one week.`, The minimum value is 1 second. The maximum value is one week.`,
Default: fs.Duration(7 * 24 * time.Hour), Default: fs.Duration(7 * 24 * time.Hour),
Advanced: true, Advanced: true,
}, { }, {
Name: "memory_pool_flush_time", Name: "memory_pool_flush_time",
Default: fs.Duration(time.Minute), Default: memoryPoolFlushTime,
Advanced: true, Advanced: true,
Hide: fs.OptionHideBoth, Help: `How often internal memory buffer pools will be flushed.
Help: `How often internal memory buffer pools will be flushed. (no longer used)`, Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, { }, {
Name: "memory_pool_use_mmap", Name: "memory_pool_use_mmap",
Default: false, Default: memoryPoolUseMmap,
Advanced: true,
Hide: fs.OptionHideBoth,
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
}, {
Name: "lifecycle",
Help: `Set the number of days deleted files should be kept when creating a bucket.
On bucket creation, this parameter is used to create a lifecycle rule
for the entire bucket.
If lifecycle is 0 (the default) it does not create a lifecycle rule so
the default B2 behaviour applies. This is to create versions of files
on delete and overwrite and to keep them indefinitely.
If lifecycle is >0 then it creates a single rule setting the number of
days before a file that is deleted or overwritten is deleted
permanently. This is known as daysFromHidingToDeleting in the b2 docs.
The minimum value for this parameter is 1 day.
You can also enable hard_delete in the config also which will mean
deletions won't cause versions but overwrites will still cause
versions to be made.
See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket creation.
`,
Default: 0,
Advanced: true, Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
Advanced: true, Advanced: true,
// See: https://www.backblaze.com/docs/cloud-storage-files // See: https://www.backblaze.com/b2/docs/files.html
// Encode invalid UTF-8 bytes as json doesn't handle them properly. // Encode invalid UTF-8 bytes as json doesn't handle them properly.
// FIXME: allow /, but not leading, trailing or double // FIXME: allow /, but not leading, trailing or double
Default: (encoder.Display | Default: (encoder.Display |
@@ -268,11 +222,11 @@ type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"` CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
DisableCheckSum bool `config:"disable_checksum"` DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"` DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"` DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
Lifecycle int `config:"lifecycle"` MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -297,6 +251,7 @@ type Fs struct {
authMu sync.Mutex // lock for authorizing the account authMu sync.Mutex // lock for authorizing the account
pacer *fs.Pacer // To pace and retry the API calls pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
} }
// Object describes a b2 object // Object describes a b2 object
@@ -365,7 +320,7 @@ var retryErrorCodes = []int{
504, // Gateway Time-out 504, // Gateway Time-out
} }
// shouldRetryNoReauth returns a boolean as to whether this resp and err // shouldRetryNoAuth returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience // deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) { func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) { if fserrors.ContextError(ctx, &err) {
@@ -406,18 +361,11 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
// errorHandler parses a non 2xx error response into an error // errorHandler parses a non 2xx error response into an error
func errorHandler(resp *http.Response) error { func errorHandler(resp *http.Response) error {
body, err := rest.ReadBody(resp) // Decode error response
if err != nil {
fs.Errorf(nil, "Couldn't read error out of body: %v", err)
body = nil
}
// Decode error response if there was one - they can be blank
errResponse := new(api.Error) errResponse := new(api.Error)
if len(body) > 0 { err := rest.DecodeJSON(resp, &errResponse)
err = json.Unmarshal(body, errResponse)
if err != nil { if err != nil {
fs.Errorf(nil, "Couldn't decode error response: %v", err) fs.Debugf(nil, "Couldn't decode error response: %v", err)
}
} }
if errResponse.Code == "" { if errResponse.Code == "" {
errResponse.Code = "unknown" errResponse.Code = "unknown"
@@ -461,14 +409,6 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
return return
} }
func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
}
return
}
// setRoot changes the root of the Fs // setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) { func (f *Fs) setRoot(root string) {
f.root = parsePath(root) f.root = parsePath(root)
@@ -516,6 +456,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
uploads: make(map[string][]*api.GetUploadURLResponse), uploads: make(map[string][]*api.GetUploadURLResponse),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers), uploadToken: pacer.NewTokenDispenser(ci.Transfers),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
ci.Transfers,
opt.MemoryPoolUseMmap,
),
} }
f.setRoot(root) f.setRoot(root)
f.features = (&fs.Features{ f.features = (&fs.Features{
@@ -523,7 +469,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
WriteMimeType: true, WriteMimeType: true,
BucketBased: true, BucketBased: true,
BucketBasedRootOK: true, BucketBasedRootOK: true,
ChunkWriterDoesntSeek: true,
}).Fill(ctx, f) }).Fill(ctx, f)
// Set the test flag if required // Set the test flag if required
if opt.TestMode != "" { if opt.TestMode != "" {
@@ -590,7 +535,12 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
// hasPermission returns if the current AuthorizationToken has the selected permission // hasPermission returns if the current AuthorizationToken has the selected permission
func (f *Fs) hasPermission(permission string) bool { func (f *Fs) hasPermission(permission string) bool {
return slices.Contains(f.info.Allowed.Capabilities, permission) for _, capability := range f.info.Allowed.Capabilities {
if capability == permission {
return true
}
}
return false
} }
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
@@ -645,24 +595,23 @@ func (f *Fs) clearUploadURL(bucketID string) {
f.uploadMu.Unlock() f.uploadMu.Unlock()
} }
// getRW gets a RW buffer and an upload token // getBuf gets a buffer of f.opt.ChunkSize and an upload token
// //
// If noBuf is set then it just gets an upload token // If noBuf is set then it just gets an upload token
func (f *Fs) getRW(noBuf bool) (rw *pool.RW) { func (f *Fs) getBuf(noBuf bool) (buf []byte) {
f.uploadToken.Get() f.uploadToken.Get()
if !noBuf { if !noBuf {
rw = multipart.NewRW() buf = f.pool.Get()
} }
return rw return buf
} }
// putRW returns a RW buffer to the memory pool and returns an upload // putBuf returns a buffer to the memory pool and an upload token
// token
// //
// If buf is nil then it just returns the upload token // If noBuf is set then it just returns the upload token
func (f *Fs) putRW(rw *pool.RW) { func (f *Fs) putBuf(buf []byte, noBuf bool) {
if rw != nil { if !noBuf {
_ = rw.Close() f.pool.Put(buf)
} }
f.uploadToken.Put() f.uploadToken.Put()
} }
@@ -869,7 +818,7 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
// listBuckets returns all the buckets to out // listBuckets returns all the buckets to out
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) { func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
err = f.listBucketsToFn(ctx, "", func(bucket *api.Bucket) error { err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
d := fs.NewDir(bucket.Name, time.Time{}) d := fs.NewDir(bucket.Name, time.Time{})
entries = append(entries, d) entries = append(entries, d)
return nil return nil
@@ -918,7 +867,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir) bucket, directory := f.split(dir)
list := list.NewHelper(callback) list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error { listR := func(bucket, directory, prefix string, addBucket bool) error {
last := "" last := ""
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error { return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
@@ -962,14 +911,11 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
type listBucketFn func(*api.Bucket) error type listBucketFn func(*api.Bucket) error
// listBucketsToFn lists the buckets to the function supplied // listBucketsToFn lists the buckets to the function supplied
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error { func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
var account = api.ListBucketsRequest{ var account = api.ListBucketsRequest{
AccountID: f.info.AccountID, AccountID: f.info.AccountID,
BucketID: f.info.Allowed.BucketID, BucketID: f.info.Allowed.BucketID,
} }
if bucketName != "" && account.BucketID == "" {
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
}
var response api.ListBucketsResponse var response api.ListBucketsResponse
opts := rest.Opts{ opts := rest.Opts{
@@ -1015,7 +961,7 @@ func (f *Fs) getbucketType(ctx context.Context, bucket string) (bucketType strin
if bucketType != "" { if bucketType != "" {
return bucketType, nil return bucketType, nil
} }
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error { err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
// listBucketsToFn reads bucket Types // listBucketsToFn reads bucket Types
return nil return nil
}) })
@@ -1050,7 +996,7 @@ func (f *Fs) getBucketID(ctx context.Context, bucket string) (bucketID string, e
if bucketID != "" { if bucketID != "" {
return bucketID, nil return bucketID, nil
} }
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error { err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
// listBucketsToFn sets IDs // listBucketsToFn sets IDs
return nil return nil
}) })
@@ -1114,11 +1060,6 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
Name: f.opt.Enc.FromStandardName(bucket), Name: f.opt.Enc.FromStandardName(bucket),
Type: "allPrivate", Type: "allPrivate",
} }
if f.opt.Lifecycle > 0 {
request.LifecycleRules = []api.LifecycleRule{{
DaysFromHidingToDeleting: &f.opt.Lifecycle,
}}
}
var response api.Bucket var response api.Bucket
err := f.pacer.Call(func() (bool, error) { err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response) resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
@@ -1246,7 +1187,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
// if oldOnly is true then it deletes only non current files. // if oldOnly is true then it deletes only non current files.
// //
// Implemented here so we can make sure we delete old versions. // Implemented here so we can make sure we delete old versions.
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) error { func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
bucket, directory := f.split(dir) bucket, directory := f.split(dir)
if bucket == "" { if bucket == "" {
return errors.New("can't purge from root") return errors.New("can't purge from root")
@@ -1264,14 +1205,14 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
} }
} }
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool { var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
return time.Since(time.Time(timestamp)) > maxAge return time.Since(time.Time(timestamp)).Hours() > 24
} }
// Delete Config.Transfers in parallel // Delete Config.Transfers in parallel
toBeDeleted := make(chan *api.File, f.ci.Transfers) toBeDeleted := make(chan *api.File, f.ci.Transfers)
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(f.ci.Transfers) wg.Add(f.ci.Transfers)
for range f.ci.Transfers { for i := 0; i < f.ci.Transfers; i++ {
go func() { go func() {
defer wg.Done() defer wg.Done()
for object := range toBeDeleted { for object := range toBeDeleted {
@@ -1280,28 +1221,13 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
fs.Errorf(object.Name, "Can't create object %v", err) fs.Errorf(object.Name, "Can't create object %v", err)
continue continue
} }
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting") tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
err = f.deleteByID(ctx, object.ID, object.Name) err = f.deleteByID(ctx, object.ID, object.Name)
checkErr(err) checkErr(err)
tr.Done(ctx, err) tr.Done(ctx, err)
} }
}() }()
} }
if oldOnly {
if deleteHidden && deleteUnfinished {
fs.Infof(f, "cleaning bucket %q of all hidden files, and pending multipart uploads older than %v", bucket, maxAge)
} else if deleteHidden {
fs.Infof(f, "cleaning bucket %q of all hidden files", bucket)
} else if deleteUnfinished {
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
} else {
fs.Errorf(f, "cleaning bucket %q of nothing. This should never happen!", bucket)
return nil
}
} else {
fs.Infof(f, "cleaning bucket %q of all files", bucket)
}
last := "" last := ""
checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error { checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory { if !isDirectory {
@@ -1309,28 +1235,22 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
if err != nil { if err != nil {
fs.Errorf(object, "Can't create object %+v", err) fs.Errorf(object, "Can't create object %+v", err)
} }
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking") tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
if oldOnly && last != remote { if oldOnly && last != remote {
// Check current version of the file // Check current version of the file
if deleteHidden && object.Action == "hide" { if object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID) fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
if !operations.SkipDestructive(ctx, object.Name, "remove hide marker") {
toBeDeleted <- object toBeDeleted <- object
} } else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local()) fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
if !operations.SkipDestructive(ctx, object.Name, "remove pending upload") {
toBeDeleted <- object toBeDeleted <- object
}
} else { } else {
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp))) fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
} }
} else { } else {
fs.Debugf(remote, "Deleting (id %q)", object.ID) fs.Debugf(remote, "Deleting (id %q)", object.ID)
if !operations.SkipDestructive(ctx, object.Name, "delete") {
toBeDeleted <- object toBeDeleted <- object
} }
}
last = remote last = remote
tr.Done(ctx, nil) tr.Done(ctx, nil)
} }
@@ -1347,17 +1267,12 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
// Purge deletes all the files and directories including the old versions. // Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context, dir string) error { func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purge(ctx, dir, false, false, false, defaultMaxAge) return f.purge(ctx, dir, false)
} }
// CleanUp deletes all hidden files and pending multipart uploads older than 24 hours. // CleanUp deletes all the hidden files.
func (f *Fs) CleanUp(ctx context.Context) error { func (f *Fs) CleanUp(ctx context.Context) error {
return f.purge(ctx, "", true, true, true, defaultMaxAge) return f.purge(ctx, "", true)
}
// cleanUp deletes all hidden files and/or pending multipart uploads older than the specified age.
func (f *Fs) cleanUp(ctx context.Context, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) (err error) {
return f.purge(ctx, "", true, deleteHidden, deleteUnfinished, maxAge)
} }
// copy does a server-side copy from dstObj <- srcObj // copy does a server-side copy from dstObj <- srcObj
@@ -1365,7 +1280,7 @@ func (f *Fs) cleanUp(ctx context.Context, deleteHidden bool, deleteUnfinished bo
// If newInfo is nil then the metadata will be copied otherwise it // If newInfo is nil then the metadata will be copied otherwise it
// will be replaced with newInfo // will be replaced with newInfo
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) { func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
if srcObj.size > int64(f.opt.CopyCutoff) { if srcObj.size >= int64(f.opt.CopyCutoff) {
if newInfo == nil { if newInfo == nil {
newInfo, err = srcObj.getMetaData(ctx) newInfo, err = srcObj.getMetaData(ctx)
if err != nil { if err != nil {
@@ -1376,11 +1291,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
if err != nil { if err != nil {
return err return err
} }
err = up.Copy(ctx) return up.Upload(ctx)
if err != nil {
return err
}
return dstObj.decodeMetaDataFileInfo(up.info)
} }
dstBucket, dstPath := dstObj.split() dstBucket, dstPath := dstObj.split()
@@ -1509,7 +1420,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if err != nil { if err != nil {
return "", err return "", err
} }
absPath := "/" + urlEncode(bucketPath) absPath := "/" + bucketPath
link = RootURL + "/file/" + urlEncode(bucket) + absPath link = RootURL + "/file/" + urlEncode(bucket) + absPath
bucketType, err := f.getbucketType(ctx, bucket) bucketType, err := f.getbucketType(ctx, bucket)
if err != nil { if err != nil {
@@ -1569,7 +1480,7 @@ func (o *Object) Size() int64 {
// //
// Make sure it is lower case. // Make sure it is lower case.
// //
// Remove unverified prefix - see https://www.backblaze.com/docs/cloud-storage-upload-files-with-the-native-api // Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (e.g. Cyberduck) use this // Some tools (e.g. Cyberduck) use this
func cleanSHA1(sha1 string) string { func cleanSHA1(sha1 string) string {
const unverified = "unverified:" const unverified = "unverified:"
@@ -1596,11 +1507,7 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
o.size = Size o.size = Size
// Use the UploadTimestamp if can't get file info // Use the UploadTimestamp if can't get file info
o.modTime = time.Time(UploadTimestamp) o.modTime = time.Time(UploadTimestamp)
err = o.parseTimeString(Info[timeKey]) return o.parseTimeString(Info[timeKey])
if err != nil {
return err
}
return nil
} }
// decodeMetaData sets the metadata in the object from an api.File // decodeMetaData sets the metadata in the object from an api.File
@@ -1673,21 +1580,6 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
return o.getMetaDataListing(ctx) return o.getMetaDataListing(ctx)
} }
} }
// If using versionAt we need to list the find the correct version.
if o.fs.opt.VersionAt.IsSet() {
info, err := o.getMetaDataListing(ctx)
if err != nil {
return nil, err
}
if info.Action == "hide" {
// Rerturn object not found error if the current version is deleted.
return nil, fs.ErrorObjectNotFound
}
return info, nil
}
_, info, err = o.getOrHead(ctx, "HEAD", nil) _, info, err = o.getOrHead(ctx, "HEAD", nil)
return info, err return info, err
} }
@@ -1717,16 +1609,6 @@ func timeString(modTime time.Time) string {
return strconv.FormatInt(modTime.UnixNano()/1e6, 10) return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
} }
// parseTimeStringHelper converts a decimal string number of milliseconds
// elapsed since January 1, 1970 UTC into a time.Time
func parseTimeStringHelper(timeString string) (time.Time, error) {
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
if err != nil {
return time.Time{}, err
}
return time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC(), nil
}
// parseTimeString converts a decimal string number of milliseconds // parseTimeString converts a decimal string number of milliseconds
// elapsed since January 1, 1970 UTC into a time.Time and stores it in // elapsed since January 1, 1970 UTC into a time.Time and stores it in
// the modTime variable. // the modTime variable.
@@ -1734,12 +1616,12 @@ func (o *Object) parseTimeString(timeString string) (err error) {
if timeString == "" { if timeString == "" {
return nil return nil
} }
modTime, err := parseTimeStringHelper(timeString) unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
if err != nil { if err != nil {
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err) fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
return nil return nil
} }
o.modTime = modTime o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
return nil return nil
} }
@@ -1816,14 +1698,14 @@ func (file *openFile) Close() (err error) {
// Check to see we read the correct number of bytes // Check to see we read the correct number of bytes
if file.o.Size() != file.bytes { if file.o.Size() != file.bytes {
return fmt.Errorf("corrupted on transfer: lengths differ want %d vs got %d", file.o.Size(), file.bytes) return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
} }
// Check the SHA1 // Check the SHA1
receivedSHA1 := file.o.sha1 receivedSHA1 := file.o.sha1
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil)) calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 { if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
return fmt.Errorf("corrupted on transfer: SHA1 hashes differ want %q vs got %q", receivedSHA1, calculatedSHA1) return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
} }
return nil return nil
@@ -1893,19 +1775,13 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
ContentType: resp.Header.Get("Content-Type"), ContentType: resp.Header.Get("Content-Type"),
Info: Info, Info: Info,
} }
// When reading files from B2 via cloudflare using // When reading files from B2 via cloudflare using
// --b2-download-url cloudflare strips the Content-Length // --b2-download-url cloudflare strips the Content-Length
// headers (presumably so it can inject stuff) so use the old // headers (presumably so it can inject stuff) so use the old
// length read from the listing. // length read from the listing.
// Additionally, the official examples return S3 headers
// instead of native, i.e. no file ID, use ones from listing.
if info.Size < 0 { if info.Size < 0 {
info.Size = o.size info.Size = o.size
} }
if info.ID == "" {
info.ID = o.id
}
return resp, info, nil return resp, info, nil
} }
@@ -1955,7 +1831,7 @@ func init() {
// urlEncode encodes in with % encoding // urlEncode encodes in with % encoding
func urlEncode(in string) string { func urlEncode(in string) string {
var out bytes.Buffer var out bytes.Buffer
for i := range len(in) { for i := 0; i < len(in); i++ {
c := in[i] c := in[i]
if noNeedToEncode[c] { if noNeedToEncode[c] {
_ = out.WriteByte(c) _ = out.WriteByte(c)
@@ -1983,11 +1859,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil { if err != nil {
return err return err
} }
if size < 0 { if size == -1 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks) // Check if the file is large enough for a chunked upload (needs to be at least two chunks)
rw := o.fs.getRW(false) buf := o.fs.getBuf(false)
n, err := io.CopyN(rw, in, int64(o.fs.opt.ChunkSize)) n, err := io.ReadFull(in, buf)
if err == nil { if err == nil {
bufReader := bufio.NewReader(in) bufReader := bufio.NewReader(in)
in = bufReader in = bufReader
@@ -1996,42 +1872,31 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err == nil { if err == nil {
fs.Debugf(o, "File is big enough for chunked streaming") fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil, options...) up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
if err != nil { if err != nil {
o.fs.putRW(rw) o.fs.putBuf(buf, false)
return err return err
} }
// NB Stream returns the buffer and token // NB Stream returns the buffer and token
err = up.Stream(ctx, rw) return up.Stream(ctx, buf)
if err != nil { } else if err == io.EOF || err == io.ErrUnexpectedEOF {
return err
}
return o.decodeMetaDataFileInfo(up.info)
} else if err == io.EOF {
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n) fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
defer o.fs.putRW(rw) defer o.fs.putBuf(buf, false)
size = n size = int64(n)
in = rw in = bytes.NewReader(buf[:n])
} else { } else {
o.fs.putRW(rw) o.fs.putBuf(buf, false)
return err return err
} }
} else if size > int64(o.fs.opt.UploadCutoff) { } else if size > int64(o.fs.opt.UploadCutoff) {
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{ up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
Open: o.fs,
OpenOptions: options,
})
if err != nil { if err != nil {
return err return err
} }
up := chunkWriter.(*largeUpload) return up.Upload(ctx)
return o.decodeMetaDataFileInfo(up.info)
} }
modTime, err := o.getModTime(ctx, src, options) modTime := src.ModTime(ctx)
if err != nil {
return err
}
calculatedSha1, _ := src.Hash(ctx, hash.SHA1) calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
if calculatedSha1 == "" { if calculatedSha1 == "" {
@@ -2136,71 +2001,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.decodeMetaDataFileInfo(&response) return o.decodeMetaDataFileInfo(&response)
} }
// Get modTime from the source; if --metadata is set, fetch the src metadata and get it from there.
// When metadata support is added to b2, this method will need a more generic name
func (o *Object) getModTime(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (time.Time, error) {
modTime := src.ModTime(ctx)
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
if err != nil {
return time.Time{}, fmt.Errorf("failed to read metadata from source object: %w", err)
}
// merge metadata into request and user metadata
for k, v := range meta {
k = strings.ToLower(k)
// For now, the only metadata we're concerned with is "mtime"
switch k {
case "mtime":
// mtime in meta overrides source ModTime
metaModTime, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
} else {
modTime = metaModTime
}
default:
// Do nothing for now
}
}
return modTime, nil
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// FIXME what if file is smaller than 1 chunk?
if f.opt.Versions {
return info, nil, errNotWithVersions
}
if f.opt.VersionAt.IsSet() {
return info, nil, errNotWithVersionAt
}
//size := src.Size()
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
bucket, _ := o.split()
err = f.makeBucket(ctx, bucket)
if err != nil {
return info, nil, err
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(f.opt.ChunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
return info, up, err
}
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
@@ -2226,200 +2026,6 @@ func (o *Object) ID() string {
return o.id return o.id
} }
var lifecycleHelp = fs.CommandHelp{
Name: "lifecycle",
Short: "Read or set the lifecycle for a bucket",
Long: `This command can be used to read or set the lifecycle for a bucket.
Usage Examples:
To show the current lifecycle rules:
rclone backend lifecycle b2:bucket
This will dump something like this showing the lifecycle rules.
[
{
"daysFromHidingToDeleting": 1,
"daysFromUploadingToHiding": null,
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
"fileNamePrefix": ""
}
]
If there are no lifecycle rules (the default) then it will just return [].
To reset the current lifecycle rules:
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
This will run and then print the new lifecycle rules as above.
Rclone only lets you set lifecycles for the whole bucket with the
fileNamePrefix = "".
You can't disable versioning with B2. The best you can do is to set
the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
the config also which will mean deletions won't cause versions but
overwrites will still cause versions to be made.
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
`,
Opts: map[string]string{
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
},
}
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
var newRule api.LifecycleRule
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromHidingToDeleting: %w", err)
}
newRule.DaysFromHidingToDeleting = &days
}
if daysStr := opt["daysFromUploadingToHiding"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromUploadingToHiding: %w", err)
}
newRule.DaysFromUploadingToHiding = &days
}
if daysStr := opt["daysFromStartingToCancelingUnfinishedLargeFiles"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromStartingToCancelingUnfinishedLargeFiles: %w", err)
}
newRule.DaysFromStartingToCancelingUnfinishedLargeFiles = &days
}
bucketName, _ := f.split("")
if bucketName == "" {
return nil, errors.New("bucket required")
}
skip := operations.SkipDestructive(ctx, name, "update lifecycle rules")
var bucket *api.Bucket
if !skip && (newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil) {
bucketID, err := f.getBucketID(ctx, bucketName)
if err != nil {
return nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_update_bucket",
}
var request = api.UpdateBucketRequest{
ID: bucketID,
AccountID: f.info.AccountID,
LifecycleRules: []api.LifecycleRule{newRule},
}
var response api.Bucket
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
bucket = &response
} else {
err = f.listBucketsToFn(ctx, bucketName, func(b *api.Bucket) error {
bucket = b
return nil
})
if err != nil {
return nil, err
}
}
if bucket == nil {
return nil, fs.ErrorDirNotFound
}
return bucket.LifecycleRules, nil
}
var cleanupHelp = fs.CommandHelp{
Name: "cleanup",
Short: "Remove unfinished large file uploads.",
Long: `This command removes unfinished large file uploads of age greater than
max-age, which defaults to 24 hours.
Note that you can use --interactive/-i or --dry-run with this command to see what
it would do.
rclone backend cleanup b2:bucket/path/to/object
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
`,
Opts: map[string]string{
"max-age": "Max age of upload to delete",
},
}
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
maxAge := defaultMaxAge
if opt["max-age"] != "" {
maxAge, err = fs.ParseDuration(opt["max-age"])
if err != nil {
return nil, fmt.Errorf("bad max-age: %w", err)
}
}
return nil, f.cleanUp(ctx, false, true, maxAge)
}
var cleanupHiddenHelp = fs.CommandHelp{
Name: "cleanup-hidden",
Short: "Remove old versions of files.",
Long: `This command removes any old hidden versions of files.
Note that you can use --interactive/-i or --dry-run with this command to see what
it would do.
rclone backend cleanup-hidden b2:bucket/path/to/dir
`,
}
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
return nil, f.cleanUp(ctx, true, false, 0)
}
var commandHelp = []fs.CommandHelp{
lifecycleHelp,
cleanupHelp,
cleanupHiddenHelp,
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "lifecycle":
return f.lifecycleCommand(ctx, name, arg, opt)
case "cleanup":
return f.cleanupCommand(ctx, name, arg, opt)
case "cleanup-hidden":
return f.cleanupHiddenCommand(ctx, name, arg, opt)
default:
return nil, fs.ErrorCommandNotFound
}
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = &Fs{} _ fs.Fs = &Fs{}
@@ -2429,8 +2035,6 @@ var (
_ fs.CleanUpper = &Fs{} _ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{} _ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{} _ fs.PublicLinker = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Commander = &Fs{}
_ fs.Object = &Object{} _ fs.Object = &Object{}
_ fs.MimeTyper = &Object{} _ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{} _ fs.IDer = &Object{}

View File

@@ -1,31 +1,14 @@
package b2 package b2
import ( import (
"context"
"crypto/sha1"
"fmt"
"path"
"sort"
"strings"
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/version"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
// Test b2 string encoding // Test b2 string encoding
// https://www.backblaze.com/docs/cloud-storage-native-api-string-encoding // https://www.backblaze.com/b2/docs/string_encoding.html
var encodeTest = []struct { var encodeTest = []struct {
fullyEncoded string fullyEncoded string
@@ -185,435 +168,3 @@ func TestParseTimeString(t *testing.T) {
} }
} }
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
var headers = make(map[string]string)
for _, option := range options {
k, v := option.Header()
k = strings.ToLower(k)
if strings.HasPrefix(k, headerPrefix) {
headers[k[len(headerPrefix):]] = v
}
}
return headers
}
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
t.Run(what, func(t *testing.T) {
ctx := context.Background()
ss := fs.SizeSuffix(0)
err := ss.Set(size)
require.NoError(t, err)
original := random.String(int(ss))
contents := fstest.Gz(t, original)
mimeType := "text/html"
if chunkSize != "" {
ss := fs.SizeSuffix(0)
err := ss.Set(chunkSize)
require.NoError(t, err)
_, err = f.SetUploadChunkSize(ss)
require.NoError(t, err)
}
if uploadCutoff != "" {
ss := fs.SizeSuffix(0)
err := ss.Set(uploadCutoff)
require.NoError(t, err)
_, err = f.SetUploadCutoff(ss)
require.NoError(t, err)
}
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
btime := time.Now()
metadata := fs.Metadata{
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
"mtime": "2009-05-06T04:05:06.499Z",
}
// Need to specify HTTP options with the header prefix since they are passed as-is
options := []fs.OpenOption{
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
o := obj.(*Object)
gotMetadata, err := o.getMetaData(ctx)
require.NoError(t, err)
// X-Bz-Info-a & X-Bz-Info-b
optMetadata := OpenOptionToMetaData(options)
for k, v := range optMetadata {
got := gotMetadata.Info[k]
assert.Equal(t, v, got, k)
}
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
// Modification time from the x-bz-info-src_last_modified_millis header
var mtime api.Timestamp
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
if err != nil {
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
}
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
// Upload time
gotBtime := time.Time(gotMetadata.UploadTimestamp)
dt := gotBtime.Sub(btime)
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
t.Run("GzipEncoding", func(t *testing.T) {
// Test that the gzipped file we uploaded can be
// downloaded
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
gotContents := fstests.ReadObject(ctx, t, o, -1)
assert.Equal(t, wantContents, gotContents)
assert.Equal(t, wantSize, o.Size())
gotHash, err := o.Hash(ctx, hash.SHA1)
require.NoError(t, err)
assert.Equal(t, wantHash, gotHash)
}
t.Run("NoDecompress", func(t *testing.T) {
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
})
})
})
}
func (f *Fs) InternalTestMetadata(t *testing.T) {
// 1 kB regular file
f.internalTestMetadata(t, "1kiB", "", "")
// 10 MiB large file
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
}
func sha1Sum(t *testing.T, s string) string {
hash := sha1.Sum([]byte(s))
return fmt.Sprintf("%x", hash)
}
// This is adapted from the s3 equivalent.
func (f *Fs) InternalTestVersions(t *testing.T) {
ctx := context.Background()
// Small pause to make the LastModified different since AWS
// only seems to track them to 1 second granularity
time.Sleep(2 * time.Second)
// Create an object
const dirName = "versions"
const fileName = dirName + "/" + "test-versions.txt"
contents := random.String(100)
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
objMetadata, err := obj.(*Object).getMetaData(ctx)
require.NoError(t, err)
// Small pause
time.Sleep(2 * time.Second)
// Remove it
assert.NoError(t, obj.Remove(ctx))
// Small pause to make the LastModified different since AWS only seems to track them to 1 second granularity
time.Sleep(2 * time.Second)
// And create it with different size and contents
newContents := random.String(101)
newItem := fstest.NewItem(fileName, newContents, fstest.Time("2002-05-06T04:05:06.499999999Z"))
newObj := fstests.PutTestContents(ctx, t, f, &newItem, newContents, true)
newObjMetadata, err := newObj.(*Object).getMetaData(ctx)
require.NoError(t, err)
t.Run("Versions", func(t *testing.T) {
// Set --b2-versions for this test
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Read the contents
entries, err := f.List(ctx, dirName)
require.NoError(t, err)
tests := 0
var fileNameVersion string
for _, entry := range entries {
t.Log(entry)
remote := entry.Remote()
if remote == fileName {
t.Run("ReadCurrent", func(t *testing.T) {
assert.Equal(t, newContents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
})
tests++
} else if versionTime, p := version.Remove(remote); !versionTime.IsZero() && p == fileName {
t.Run("ReadVersion", func(t *testing.T) {
assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
})
assert.WithinDuration(t, time.Time(objMetadata.UploadTimestamp), versionTime, time.Second, "object time must be with 1 second of version time")
fileNameVersion = remote
tests++
}
}
assert.Equal(t, 2, tests, "object missing from listing")
// Check we can read the object with a version suffix
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, fileNameVersion)
require.NoError(t, err)
require.NotNil(t, o)
assert.Equal(t, int64(100), o.Size(), o.Remote())
})
// Check we can make a NewFs from that object with a version suffix
t.Run("NewFs", func(t *testing.T) {
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
// Make sure --b2-versions is set in the config of the new remote
fs.Debugf(nil, "oldPath = %q", newPath)
lastColon := strings.LastIndex(newPath, ":")
require.True(t, lastColon >= 0)
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
fs.Debugf(nil, "newPath = %q", newPath)
fNew, err := cache.Get(ctx, newPath)
// This should return pointing to a file
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, fNew)
// With the directory above
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
})
})
t.Run("VersionAt", func(t *testing.T) {
// We set --b2-version-at for this test so make sure we reset it at the end
defer func() {
f.opt.VersionAt = fs.Time{}
}()
var (
firstObjectTime = time.Time(objMetadata.UploadTimestamp)
secondObjectTime = time.Time(newObjMetadata.UploadTimestamp)
)
for _, test := range []struct {
what string
at time.Time
want []fstest.Item
wantErr error
wantSize int64
}{
{
what: "Before",
at: firstObjectTime.Add(-time.Second),
want: fstests.InternalTestFiles,
wantErr: fs.ErrorObjectNotFound,
},
{
what: "AfterOne",
at: firstObjectTime.Add(time.Second),
want: append([]fstest.Item{item}, fstests.InternalTestFiles...),
wantSize: 100,
},
{
what: "AfterDelete",
at: secondObjectTime.Add(-time.Second),
want: fstests.InternalTestFiles,
wantErr: fs.ErrorObjectNotFound,
},
{
what: "AfterTwo",
at: secondObjectTime.Add(time.Second),
want: append([]fstest.Item{newItem}, fstests.InternalTestFiles...),
wantSize: 101,
},
} {
t.Run(test.what, func(t *testing.T) {
f.opt.VersionAt = fs.Time(test.at)
t.Run("List", func(t *testing.T) {
fstest.CheckListing(t, f, test.want)
})
t.Run("NewObject", func(t *testing.T) {
gotObj, gotErr := f.NewObject(ctx, fileName)
assert.Equal(t, test.wantErr, gotErr)
if gotErr == nil {
assert.Equal(t, test.wantSize, gotObj.Size())
}
})
})
}
})
t.Run("Cleanup", func(t *testing.T) {
t.Run("DryRun", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should be unchanged after dry run
before := listAllFiles(ctx, t, f, dirName)
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, true, false, 0))
after := listAllFiles(ctx, t, f, dirName)
assert.Equal(t, before, after)
})
t.Run("RealThing", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should reflect current state after cleanup
require.NoError(t, f.cleanUp(ctx, true, false, 0))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
fstest.CheckListing(t, f, items)
})
})
// Purge gets tested later
}
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
ctx := context.Background()
// B2CleanupHidden tests cleaning up hidden files
t.Run("CleanupUnfinished", func(t *testing.T) {
dirName := "unfinished"
fileCount := 5
expectedFiles := []string{}
for i := 1; i < fileCount; i++ {
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
expectedFiles = append(expectedFiles, fileName)
obj := &Object{
fs: f,
remote: fileName,
}
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
require.NoError(t, err)
}
checkListing(ctx, t, f, dirName, expectedFiles)
t.Run("DryRun", func(t *testing.T) {
// Listing should not change after dry run
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, expectedFiles)
})
t.Run("RealThing", func(t *testing.T) {
// Listing should be empty after real cleanup
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, []string{})
})
})
}
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
bucket, directory := f.split(dirName)
foundFiles := []string{}
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
foundFiles = append(foundFiles, object.Name)
}
return nil
}))
sort.Strings(foundFiles)
return foundFiles
}
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
foundFiles := listAllFiles(ctx, t, f, dirName)
sort.Strings(expectedFiles)
assert.Equal(t, expectedFiles, foundFiles)
}
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
ctx := context.Background()
opt := map[string]string{}
t.Run("InitState", func(t *testing.T) {
// There should be no lifecycle rules at the outset
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("DryRun", func(t *testing.T) {
// There should still be no lifecycle rules after each dry run operation
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("RealThing", func(t *testing.T) {
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
})
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Metadata", f.InternalTestMetadata)
t.Run("Versions", f.InternalTestVersions)
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -28,12 +28,7 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs) return f.setUploadCutoff(cs)
} }
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var ( var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil) _ fstests.SetUploadCutoffer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
) )

View File

@@ -1,10 +1,11 @@
// Upload large files for b2 // Upload large files for b2
// //
// Docs - https://www.backblaze.com/docs/cloud-storage-large-files // Docs - https://www.backblaze.com/b2/docs/large_files.html
package b2 package b2
import ( import (
"bytes"
"context" "context"
"crypto/sha1" "crypto/sha1"
"encoding/hex" "encoding/hex"
@@ -20,7 +21,6 @@ import (
"github.com/rclone/rclone/fs/chunksize" "github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@@ -78,31 +78,36 @@ type largeUpload struct {
wrap accounting.WrapFn // account parts being transferred wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded id string // ID of the file being uploaded
size int64 // total size size int64 // total size
parts int // calculated number of parts, if known parts int64 // calculated number of parts, if known
sha1smu sync.Mutex // mutex to protect sha1s
sha1s []string // slice of SHA1s for each part sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from src *Object // if copying, object we are reading from
info *api.FileInfo // final response with info about the object
} }
// newLargeUpload starts an upload of object o from in with metadata in src // newLargeUpload starts an upload of object o from in with metadata in src
// //
// If newInfo is set then metadata from that will be used instead of reading it from src // If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) { func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
size := src.Size() size := src.Size()
parts := 0 parts := int64(0)
sha1SliceSize := int64(maxParts)
chunkSize := defaultChunkSize chunkSize := defaultChunkSize
if size == -1 { if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize) fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else { } else {
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize) chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
parts = int(size / int64(chunkSize)) parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 { if size%int64(chunkSize) != 0 {
parts++ parts++
} }
sha1SliceSize = parts
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
} }
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
bucketID, err := f.getBucketID(ctx, bucket) bucketID, err := f.getBucketID(ctx, bucket)
@@ -113,27 +118,12 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
BucketID: bucketID, BucketID: bucketID,
Name: f.opt.Enc.FromStandardPath(bucketPath), Name: f.opt.Enc.FromStandardPath(bucketPath),
} }
optionsToSend := make([]fs.OpenOption, 0, len(options))
if newInfo == nil { if newInfo == nil {
modTime, err := o.getModTime(ctx, src, options) modTime := src.ModTime(ctx)
if err != nil {
return nil, err
}
request.ContentType = fs.MimeType(ctx, src) request.ContentType = fs.MimeType(ctx, src)
request.Info = map[string]string{ request.Info = map[string]string{
timeKey: timeString(modTime), timeKey: timeString(modTime),
} }
// Custom upload headers - remove header prefix since they are sent in the body
for _, option := range options {
k, v := option.Header()
k = strings.ToLower(k)
if strings.HasPrefix(k, headerPrefix) {
request.Info[k[len(headerPrefix):]] = v
} else {
optionsToSend = append(optionsToSend, option)
}
}
// Set the SHA1 if known // Set the SHA1 if known
if !o.fs.opt.DisableCheckSum || doCopy { if !o.fs.opt.DisableCheckSum || doCopy {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" { if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
@@ -144,11 +134,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
request.ContentType = newInfo.ContentType request.ContentType = newInfo.ContentType
request.Info = newInfo.Info request.Info = newInfo.Info
} }
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
Options: optionsToSend,
}
var response api.StartLargeFileResponse var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response) resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
@@ -165,7 +150,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
id: response.ID, id: response.ID,
size: size, size: size,
parts: parts, parts: parts,
sha1s: make([]string, 0, 16), sha1s: make([]string, sha1SliceSize),
chunkSize: int64(chunkSize), chunkSize: int64(chunkSize),
} }
// unwrap the accounting from the input, we use wrap to put it // unwrap the accounting from the input, we use wrap to put it
@@ -184,13 +169,8 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// This should be returned with returnUploadURL when finished // This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) { func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock() up.uploadMu.Lock()
if len(up.uploads) > 0 { defer up.uploadMu.Unlock()
upload, up.uploads = up.uploads[0], up.uploads[1:] if len(up.uploads) == 0 {
up.uploadMu.Unlock()
return upload, nil
}
up.uploadMu.Unlock()
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_get_upload_part_url", Path: "/b2_get_upload_part_url",
@@ -198,13 +178,16 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
var request = api.GetUploadPartURLRequest{ var request = api.GetUploadPartURLRequest{
ID: up.id, ID: up.id,
} }
err = up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload) resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err) return up.f.shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err) return nil, fmt.Errorf("failed to get upload URL: %w", err)
} }
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:]
}
return upload, nil return upload, nil
} }
@@ -218,39 +201,10 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock() up.uploadMu.Unlock()
} }
// Add an sha1 to the being built up sha1s // Transfer a chunk
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) { func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
up.sha1smu.Lock() err := up.f.pacer.Call(func() (bool, error) {
defer up.sha1smu.Unlock() fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
if len(up.sha1s) < chunkNumber+1 {
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
}
up.sha1s[chunkNumber] = sha1
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(1)
}
err = up.f.pacer.Call(func() (bool, error) {
// Discover the size by seeking to the end
size, err = reader.Seek(0, io.SeekEnd)
if err != nil {
return false, err
}
// rewind the reader on retry and after reading size
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
// Get upload URL // Get upload URL
upload, err := up.getUploadURL(ctx) upload, err := up.getUploadURL(ctx)
@@ -258,8 +212,8 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
return false, err return false, err
} }
in := newHashAppendingReader(reader, sha1.New()) in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
sizeWithHash := size + int64(in.AdditionalLength()) size := int64(len(body)) + int64(in.AdditionalLength())
// Authorization // Authorization
// //
@@ -289,10 +243,10 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
Body: up.wrap(in), Body: up.wrap(in),
ExtraHeaders: map[string]string{ ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken, "Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1), "X-Bz-Part-Number": fmt.Sprintf("%d", part),
sha1Header: "hex_digits_at_end", sha1Header: "hex_digits_at_end",
}, },
ContentLength: &sizeWithHash, ContentLength: &size,
} }
var response api.UploadPartResponse var response api.UploadPartResponse
@@ -300,7 +254,7 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response) resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err) retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil { if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err) fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
} }
// On retryable error clear PartUploadURL // On retryable error clear PartUploadURL
if retry { if retry {
@@ -308,30 +262,30 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
upload = nil upload = nil
} }
up.returnUploadURL(upload) up.returnUploadURL(upload)
up.addSha1(chunkNumber, in.HexSum()) up.sha1s[part-1] = in.HexSum()
return retry, err return retry, err
}) })
if err != nil { if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err) fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
} else { } else {
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber) fs.Debugf(up.o, "Done sending chunk %d", part)
} }
return size, err return err
} }
// Copy a chunk // Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error { func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize) fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_copy_part", Path: "/b2_copy_part",
} }
offset := int64(part) * up.chunkSize // where we are in the source file offset := (part - 1) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{ var request = api.CopyPartRequest{
SourceID: up.src.id, SourceID: up.src.id,
LargeFileID: up.id, LargeFileID: up.id,
PartNumber: int64(part + 1), PartNumber: part,
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1), Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
} }
var response api.UploadPartResponse var response api.UploadPartResponse
@@ -340,7 +294,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
if err != nil { if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err) fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
} }
up.addSha1(part, response.SHA1) up.sha1s[part-1] = response.SHA1
return retry, err return retry, err
}) })
if err != nil { if err != nil {
@@ -351,8 +305,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
return err return err
} }
// Close closes off the large upload // finish closes off the large upload
func (up *largeUpload) Close(ctx context.Context) error { func (up *largeUpload) finish(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts) fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -370,12 +324,11 @@ func (up *largeUpload) Close(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
up.info = &response return up.o.decodeMetaDataFileInfo(&response)
return nil
} }
// Abort aborts the large upload // cancel aborts the large upload
func (up *largeUpload) Abort(ctx context.Context) error { func (up *largeUpload) cancel(ctx context.Context) error {
fs.Debugf(up.o, "Cancelling large file %s", up.what) fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -400,102 +353,128 @@ func (up *largeUpload) Abort(ctx context.Context) error {
// reaches EOF. // reaches EOF.
// //
// Note that initialUploadBlock must be returned to f.putBuf() // Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) { func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })() defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id) fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
var ( var (
g, gCtx = errgroup.WithContext(ctx) g, gCtx = errgroup.WithContext(ctx)
hasMoreParts = true hasMoreParts = true
) )
up.size = initialUploadBlock.Size() up.size = int64(len(initialUploadBlock))
up.parts = 0 g.Go(func() error {
for part := 0; hasMoreParts; part++ { for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency. // Get a block of memory from the pool and token which limits concurrency.
var rw *pool.RW var buf []byte
if part == 0 { if part == 1 {
rw = initialUploadBlock buf = initialUploadBlock
} else { } else {
rw = up.f.getRW(false) buf = up.f.getBuf(false)
} }
// Fail fast, in case an errgroup managed function returns an error // Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts. // gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil { if gCtx.Err() != nil {
up.f.putRW(rw) up.f.putBuf(buf, false)
break return nil
} }
// Read the chunk // Read the chunk
var n int64 var n int
if part == 0 { if part == 1 {
n = rw.Size() n = len(buf)
} else { } else {
n, err = io.CopyN(rw, up.in, up.chunkSize) n, err = io.ReadFull(up.in, buf)
if err == io.EOF { if err == io.ErrUnexpectedEOF {
if n == 0 { fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
fs.Debugf(up.o, "Not sending empty chunk after EOF - ending.") buf = buf[:n]
up.f.putRW(rw)
break
} else {
fs.Debugf(up.o, "Read less than a full chunk %d, making this the last one.", n)
}
hasMoreParts = false hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil { } else if err != nil {
// other kinds of errors indicate failure // other kinds of errors indicate failure
up.f.putRW(rw) up.f.putBuf(buf, false)
return err return err
} }
} }
// Keep stats up to date // Keep stats up to date
up.parts += 1 up.parts = part
up.size += n up.size += int64(n)
if part > maxParts { if part > maxParts {
up.f.putRW(rw) up.f.putBuf(buf, false)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts) return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
} }
part := part // for the closure part := part // for the closure
g.Go(func() (err error) { g.Go(func() (err error) {
defer up.f.putRW(rw) defer up.f.putBuf(buf, false)
_, err = up.WriteChunk(gCtx, part, rw) return up.transferChunk(gCtx, part, buf)
return err
}) })
} }
return nil
})
err = g.Wait() err = g.Wait()
if err != nil { if err != nil {
return err return err
} }
return up.Close(ctx) up.sha1s = up.sha1s[:up.parts]
return up.finish(ctx)
} }
// Copy the chunks from the source to the destination // Upload uploads the chunks from the input
func (up *largeUpload) Copy(ctx context.Context) (err error) { func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })() defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id) fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var ( var (
g, gCtx = errgroup.WithContext(ctx) g, gCtx = errgroup.WithContext(ctx)
remaining = up.size remaining = up.size
) )
g.SetLimit(up.f.opt.UploadConcurrency) g.Go(func() error {
for part := range up.parts { for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
buf := up.f.getBuf(up.doCopy)
// Fail fast, in case an errgroup managed function returns an error // Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in copying all the other parts. // gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil { if gCtx.Err() != nil {
break up.f.putBuf(buf, up.doCopy)
return nil
} }
reqSize := min(remaining, up.chunkSize) reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putBuf(buf, up.doCopy)
return err
}
}
part := part // for the closure part := part // for the closure
g.Go(func() (err error) { g.Go(func() (err error) {
return up.copyChunk(gCtx, part, reqSize) defer up.f.putBuf(buf, up.doCopy)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
}) })
remaining -= reqSize remaining -= reqSize
} }
return nil
})
err = g.Wait() err = g.Wait()
if err != nil { if err != nil {
return err return err
} }
return up.Close(ctx) return up.finish(ctx)
} }

View File

@@ -52,7 +52,7 @@ func (e *Error) Error() string {
out += ": " + e.Message out += ": " + e.Message
} }
if e.ContextInfo != nil { if e.ContextInfo != nil {
out += fmt.Sprintf(" (%s)", string(e.ContextInfo)) out += fmt.Sprintf(" (%+v)", e.ContextInfo)
} }
return out return out
} }
@@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo // ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by" var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
// Types of things in Item/ItemMini // Types of things in Item
const ( const (
ItemTypeFolder = "folder" ItemTypeFolder = "folder"
ItemTypeFile = "file" ItemTypeFile = "file"
@@ -72,21 +72,11 @@ const (
ItemStatusDeleted = "deleted" ItemStatusDeleted = "deleted"
) )
// ItemMini is a subset of the elements in a full Item returned by some API calls
type ItemMini struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
}
// Item describes a folder or a file as returned by Get Folder Items and others // Item describes a folder or a file as returned by Get Folder Items and others
type Item struct { type Item struct {
Type string `json:"type"` Type string `json:"type"`
ID string `json:"id"` ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"` SequenceID string `json:"sequence_id"`
Etag string `json:"etag"` Etag string `json:"etag"`
SHA1 string `json:"sha1"` SHA1 string `json:"sha1"`
Name string `json:"name"` Name string `json:"name"`
@@ -96,7 +86,6 @@ type Item struct {
ContentCreatedAt Time `json:"content_created_at"` ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"` ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
Parent ItemMini `json:"parent"`
SharedLink struct { SharedLink struct {
URL string `json:"url,omitempty"` URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"` Access string `json:"access,omitempty"`
@@ -167,7 +156,19 @@ type PreUploadCheckResponse struct {
// PreUploadCheckConflict is returned in the ContextInfo error field // PreUploadCheckConflict is returned in the ContextInfo error field
// from PreUploadCheck when the error code is "item_name_in_use" // from PreUploadCheck when the error code is "item_name_in_use"
type PreUploadCheckConflict struct { type PreUploadCheckConflict struct {
Conflicts ItemMini `json:"conflicts"` Conflicts struct {
Type string `json:"type"`
ID string `json:"id"`
FileVersion struct {
Type string `json:"type"`
ID string `json:"id"`
Sha1 string `json:"sha1"`
} `json:"file_version"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
Sha1 string `json:"sha1"`
Name string `json:"name"`
} `json:"conflicts"`
} }
// UpdateFileModTime is used in Update File Info // UpdateFileModTime is used in Update File Info
@@ -280,30 +281,3 @@ type User struct {
Address string `json:"address"` Address string `json:"address"`
AvatarURL string `json:"avatar_url"` AvatarURL string `json:"avatar_url"`
} }
// FileTreeChangeEventTypes are the events that can require cache invalidation
var FileTreeChangeEventTypes = map[string]struct{}{
"ITEM_COPY": {},
"ITEM_CREATE": {},
"ITEM_MAKE_CURRENT_VERSION": {},
"ITEM_MODIFY": {},
"ITEM_MOVE": {},
"ITEM_RENAME": {},
"ITEM_TRASH": {},
"ITEM_UNDELETE_VIA_TRASH": {},
"ITEM_UPLOAD": {},
}
// Event is an array element in the response returned from /events
type Event struct {
EventType string `json:"event_type"`
EventID string `json:"event_id"`
Source Item `json:"source"`
}
// Events is returned from /events
type Events struct {
ChunkSize int64 `json:"chunk_size"`
Entries []Event `json:"entries"`
NextStreamPosition int64 `json:"next_stream_position"`
}

View File

@@ -17,9 +17,9 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"os"
"path" "path"
"strconv" "strconv"
"strings" "strings"
@@ -27,7 +27,6 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/golang-jwt/jwt/v4"
"github.com/rclone/rclone/backend/box/api" "github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@@ -43,9 +42,10 @@ import (
"github.com/rclone/rclone/lib/jwtutil" "github.com/rclone/rclone/lib/jwtutil"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8" "github.com/youmark/pkcs8"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
) )
const ( const (
@@ -64,21 +64,18 @@ const (
// Globals // Globals
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
oauthConfig = &oauthutil.Config{ oauthConfig = &oauth2.Config{
Scopes: nil, Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://app.box.com/api/oauth2/authorize", AuthURL: "https://app.box.com/api/oauth2/authorize",
TokenURL: "https://app.box.com/api/oauth2/token", TokenURL: "https://app.box.com/api/oauth2/token",
},
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL, RedirectURL: oauthutil.RedirectURL,
} }
) )
type boxCustomClaims struct {
jwt.StandardClaims
BoxSubType string `json:"box_sub_type,omitempty"`
}
// Register with Fs // Register with Fs
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
@@ -109,14 +106,12 @@ func init() {
Help: "Fill in for rclone to use a non root folder as its starting point.", Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0", Default: "0",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "box_config_file", Name: "box_config_file",
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp, Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, { }, {
Name: "access_token", Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.", Help: "Box App Primary Access Token\n\nLeave blank normally.",
Sensitive: true,
}, { }, {
Name: "box_sub_type", Name: "box_sub_type",
Default: "user", Default: "user",
@@ -147,23 +142,6 @@ func init() {
Default: "", Default: "",
Help: "Only show items owned by the login (email address) passed in.", Help: "Only show items owned by the login (email address) passed in.",
Advanced: true, Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user ID when using a service account.
Setting this flag allows rclone, when using a JWT service account, to
act on behalf of another user by setting the as-user header.
The user ID is the Box identifier for a user. User IDs can found for
any user via the GET /users endpoint, which is only available to
admins, or by calling the GET /users/me endpoint with an authenticated
user session.
See: https://developer.box.com/guides/authentication/jwt/as-user/
`,
Advanced: true,
Sensitive: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -200,12 +178,12 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
signingHeaders := getSigningHeaders(boxConfig) signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig) queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(ctx) client := fshttp.NewClient(ctx)
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client) err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
return err return err
} }
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) { func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := os.ReadFile(configFile) file, err := ioutil.ReadFile(configFile)
if err != nil { if err != nil {
return nil, fmt.Errorf("box: failed to read Box config: %w", err) return nil, fmt.Errorf("box: failed to read Box config: %w", err)
} }
@@ -216,31 +194,34 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
return boxConfig, nil return boxConfig, nil
} }
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) { func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20) val, err := jwtutil.RandomHex(20)
if err != nil { if err != nil {
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err) return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
} }
claims = &boxCustomClaims{ claims = &jws.ClaimSet{
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely Iss: boxConfig.BoxAppSettings.ClientID,
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019 Sub: boxConfig.EnterpriseID,
StandardClaims: jwt.StandardClaims{ Aud: tokenURL,
Id: val, Exp: time.Now().Add(time.Second * 45).Unix(),
Issuer: boxConfig.BoxAppSettings.ClientID, PrivateClaims: map[string]interface{}{
Subject: boxConfig.EnterpriseID, "box_sub_type": boxSubType,
Audience: tokenURL, "aud": tokenURL,
ExpiresAt: time.Now().Add(time.Second * 45).Unix(), "jti": val,
}, },
BoxSubType: boxSubType,
} }
return claims, nil return claims, nil
} }
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]any { func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
signingHeaders := map[string]any{ signingHeaders := &jws.Header{
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID, Algorithm: "RS256",
Typ: "JWT",
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
} }
return signingHeaders return signingHeaders
} }
@@ -254,10 +235,8 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
} }
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) { func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey)) block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if block == nil {
return nil, errors.New("box: failed to PEM decode private key")
}
if len(rest) > 0 { if len(rest) > 0 {
return nil, fmt.Errorf("box: extra data included in private key: %w", err) return nil, fmt.Errorf("box: extra data included in private key: %w", err)
} }
@@ -279,14 +258,6 @@ type Options struct {
AccessToken string `config:"access_token"` AccessToken string `config:"access_token"`
ListChunk int `config:"list_chunk"` ListChunk int `config:"list_chunk"`
OwnedBy string `config:"owned_by"` OwnedBy string `config:"owned_by"`
Impersonate string `config:"impersonate"`
}
// ItemMeta defines metadata we cache for each Item ID
type ItemMeta struct {
SequenceID int64 // the most recent event processed for this item
ParentID string // ID of the parent directory of this item
Name string // leaf name of this item
} }
// Fs represents a remote box // Fs represents a remote box
@@ -300,8 +271,6 @@ type Fs struct {
pacer *fs.Pacer // pacer for API calls pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency uploadToken *pacer.TokenDispenser // control concurrency
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
} }
// Object describes a box object // Object describes a box object
@@ -380,7 +349,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
// readMetaDataForPath reads the metadata from the path // readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
// defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
@@ -389,30 +358,20 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err return nil, err
} }
// Use preupload to find the ID found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
itemMini, err := f.preUploadCheck(ctx, leaf, directoryID, -1) if strings.EqualFold(item.Name, leaf) {
if err != nil { info = item
return nil, err return true
} }
if itemMini == nil { return false
return nil, fs.ErrorObjectNotFound
}
// Now we have the ID we can look up the object proper
opts := rest.Opts{
Method: "GET",
Path: "/files/" + itemMini.ID,
Parameters: fieldsValue(),
}
var item api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &item)
return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &item, nil if !found {
return nil, fs.ErrorObjectNotFound
}
return info, nil
} }
// errorHandler parses a non 2xx error response into an error // errorHandler parses a non 2xx error response into an error
@@ -465,8 +424,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
srv: rest.NewClient(client).SetRoot(rootURL), srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers), uploadToken: pacer.NewTokenDispenser(ci.Transfers),
itemMetaCacheMu: new(sync.Mutex),
itemMetaCache: make(map[string]ItemMeta),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
@@ -479,11 +436,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken) f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
} }
// If using impersonate set an as-user header
if f.opt.Impersonate != "" {
f.srv.SetHeader("as-user", f.opt.Impersonate)
}
jsonFile, ok := m.Get("box_config_file") jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type") boxSubType, boxSubTypeOk := m.Get("box_sub_type")
@@ -619,7 +571,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
// fmt.Printf("...Error %v\n", err) //fmt.Printf("...Error %v\n", err)
return "", err return "", err
} }
// fmt.Printf("...Id %q\n", *info.Id) // fmt.Printf("...Id %q\n", *info.Id)
@@ -726,17 +678,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
entries = append(entries, o) entries = append(entries, o)
} }
// Cache some metadata for this Item to help us process events later
// on. In particular, the box event API does not provide the old path
// of the Item when it is renamed/deleted/moved/etc.
f.itemMetaCacheMu.Lock()
cachedItemMeta, found := f.itemMetaCache[info.ID]
if !found || cachedItemMeta.SequenceID < info.SequenceID {
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
}
f.itemMetaCacheMu.Unlock()
return false return false
}) })
if err != nil { if err != nil {
@@ -772,7 +713,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// //
// It returns "", nil if the file is good to go // It returns "", nil if the file is good to go
// It returns "ID", nil if the file must be updated // It returns "ID", nil if the file must be updated
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (item *api.ItemMini, err error) { func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
check := api.PreUploadCheck{ check := api.PreUploadCheck{
Name: f.opt.Enc.FromStandardName(leaf), Name: f.opt.Enc.FromStandardName(leaf),
Parent: api.Parent{ Parent: api.Parent{
@@ -797,16 +738,16 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
var conflict api.PreUploadCheckConflict var conflict api.PreUploadCheckConflict
err = json.Unmarshal(apiErr.ContextInfo, &conflict) err = json.Unmarshal(apiErr.ContextInfo, &conflict)
if err != nil { if err != nil {
return nil, fmt.Errorf("pre-upload check: JSON decode failed: %w", err) return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
} }
if conflict.Conflicts.Type != api.ItemTypeFile { if conflict.Conflicts.Type != api.ItemTypeFile {
return nil, fs.ErrorIsDir return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
} }
return &conflict.Conflicts, nil return conflict.Conflicts.ID, nil
} }
return nil, fmt.Errorf("pre-upload check: %w", err) return "", fmt.Errorf("pre-upload check: %w", err)
} }
return nil, nil return "", nil
} }
// Put the object // Put the object
@@ -827,11 +768,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// Preflight check the upload, which returns the ID if the // Preflight check the upload, which returns the ID if the
// object already exists // object already exists
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size()) ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
if err != nil { if err != nil {
return nil, err return nil, err
} }
if item == nil { if ID == "" {
return f.PutUnchecked(ctx, in, src, options...) return f.PutUnchecked(ctx, in, src, options...)
} }
@@ -839,7 +780,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
id: item.ID, id: ID,
} }
return o, o.Update(ctx, in, src, options...) return o, o.Update(ctx, in, src, options...)
} }
@@ -966,26 +907,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err return nil, err
} }
// check if dest already exists
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
if err != nil {
return nil, err
}
if item != nil { // dest already exists, need to copy to temp name and then move
tempSuffix := "-rclone-copy-" + random.String(8)
fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix)
tempObj, err := f.Copy(ctx, src, remote+tempSuffix)
if err != nil {
return nil, err
}
fs.Debugf(remote+tempSuffix, "moving to real name %v", remote)
err = f.deleteObject(ctx, item.ID)
if err != nil {
return nil, err
}
return f.Move(ctx, tempObj, remote)
}
// Copy the object // Copy the object
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -1196,7 +1117,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
// CleanUp empties the trash // CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) { func (f *Fs) CleanUp(ctx context.Context) (err error) {
var ( var (
deleteErrors atomic.Uint64 deleteErrors = int64(0)
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers) concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
wg sync.WaitGroup wg sync.WaitGroup
) )
@@ -1212,7 +1133,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
err := f.deletePermanently(ctx, item.Type, item.ID) err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil { if err != nil {
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err) fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
deleteErrors.Add(1) atomic.AddInt64(&deleteErrors, 1)
} }
}() }()
} else { } else {
@@ -1221,279 +1142,12 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
return false return false
}) })
wg.Wait() wg.Wait()
if deleteErrors.Load() != 0 { if deleteErrors != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load()) return fmt.Errorf("failed to delete %d trash items", deleteErrors)
} }
return err return err
} }
// Shutdown shutdown the fs
func (f *Fs) Shutdown(ctx context.Context) error {
f.tokenRenewer.Shutdown()
return nil
}
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the `stream_position` early so all changes from now on get processed
streamPosition, err := f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
}
// box can send duplicate Event IDs. Use this map to track and filter
// the ones we've already processed.
processedEventIDs := make(map[string]time.Time)
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
if pollInterval != 0 {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
if streamPosition == "" {
streamPosition, err = f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
continue
}
}
// Garbage collect EventIDs older than 1 minute
for eventID, timestamp := range processedEventIDs {
if time.Since(timestamp) > time.Minute {
delete(processedEventIDs, eventID)
}
}
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition, processedEventIDs)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
}
}
}()
}
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", "now")
opts.Parameters.Set("stream_type", "changes")
var result api.Events
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
return strconv.FormatInt(result.NextStreamPosition, 10), nil
}
// Attempts to construct the full path for an object, given the ID of its
// parent directory and the name of the object.
//
// Can return "" if the parentID is not currently in the directory cache.
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
fullPath = ""
name := f.opt.Enc.ToStandardName(childName)
if parentID != "" {
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
if len(parentDir) > 0 {
fullPath = parentDir + "/" + name
} else {
fullPath = name
}
}
} else {
// No parent, this object is at the root
fullPath = name
}
return fullPath
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string, processedEventIDs map[string]time.Time) (nextStreamPosition string, err error) {
nextStreamPosition = streamPosition
for {
// box only allows a max of 500 events
limit := min(f.opt.ListChunk, 500)
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", nextStreamPosition)
opts.Parameters.Set("stream_type", "changes")
opts.Parameters.Set("limit", strconv.Itoa(limit))
var result api.Events
var resp *http.Response
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
if result.ChunkSize != int64(len(result.Entries)) {
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
}
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
if result.ChunkSize == 0 {
return nextStreamPosition, nil
}
type pathToClear struct {
path string
entryType fs.EntryType
}
var pathsToClear []pathToClear
newEventIDs := 0
for _, entry := range result.Entries {
eventDetails := fmt.Sprintf("[%q(%d)|%s|%s|%s|%s]", entry.Source.Name, entry.Source.SequenceID,
entry.Source.Type, entry.EventType, entry.Source.ID, entry.EventID)
if entry.EventID == "" {
fs.Debugf(f, "%s ignored due to missing EventID", eventDetails)
continue
}
if _, ok := processedEventIDs[entry.EventID]; ok {
fs.Debugf(f, "%s ignored due to duplicate EventID", eventDetails)
continue
}
processedEventIDs[entry.EventID] = time.Now()
newEventIDs++
if entry.Source.ID == "" { // missing File or Folder ID
fs.Debugf(f, "%s ignored due to missing SourceID", eventDetails)
continue
}
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
fs.Debugf(f, "%s ignored due to unsupported SourceType", eventDetails)
continue
}
// Only interested in event types that result in a file tree change
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
fs.Debugf(f, "%s ignored due to unsupported EventType", eventDetails)
continue
}
f.itemMetaCacheMu.Lock()
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
if cachedItemMetaFound {
if itemMeta.SequenceID >= entry.Source.SequenceID {
// Item in the cache has the same or newer SequenceID than
// this event. Ignore this event, it must be old.
f.itemMetaCacheMu.Unlock()
fs.Debugf(f, "%s ignored due to old SequenceID (%q)", eventDetails, itemMeta.SequenceID)
continue
}
// This event is newer. Delete its entry from the cache,
// we'll notify about its change below, then it's up to a
// future list operation to repopulate the cache.
delete(f.itemMetaCache, entry.Source.ID)
}
f.itemMetaCacheMu.Unlock()
entryType := fs.EntryDirectory
if entry.Source.Type == api.ItemTypeFile {
entryType = fs.EntryObject
}
// The box event only includes the new path for the object (e.g.
// the path after the object was moved). If there was an old path
// saved in our cache, it must be cleared.
if cachedItemMetaFound {
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
if path != "" {
fs.Debugf(f, "%s added old path (%q) for notify", eventDetails, path)
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
} else {
fs.Debugf(f, "%s old parent not cached", eventDetails)
}
// If this is a directory, also delete it from the dir cache.
// This will effectively invalidate the item metadata cache
// entries for all descendents of this directory, since we
// will no longer be able to construct a full path for them.
// This is exactly what we want, since we don't want to notify
// on the paths of these descendents if one of their ancestors
// has been renamed/deleted.
if entry.Source.Type == api.ItemTypeFolder {
f.dirCache.FlushDir(path)
}
}
// If the item is "active", then it is not trashed or deleted, so
// it potentially has a valid parent.
//
// Construct the new path of the object, based on the Parent ID
// and its name. If we get an empty result, it means we don't
// currently know about this object so notification is unnecessary.
if entry.Source.ItemStatus == api.ItemStatusActive {
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
if path != "" {
fs.Debugf(f, "%s added new path (%q) for notify", eventDetails, path)
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
} else {
fs.Debugf(f, "%s new parent not found", eventDetails)
}
}
}
// box can sometimes repeatedly return the same Event IDs within a
// short period of time. If it stops giving us new ones, treat it
// the same as if it returned us none at all.
if newEventIDs == 0 {
return nextStreamPosition, nil
}
notifiedPaths := make(map[string]bool)
for _, p := range pathsToClear {
if _, ok := notifiedPaths[p.path]; ok {
continue
}
notifiedPaths[p.path] = true
notifyFunc(p.path, p.entryType)
}
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
}
}
// DirCacheFlush resets the directory cache - used in testing as an // DirCacheFlush resets the directory cache - used in testing as an
// optional interface // optional interface
func (f *Fs) DirCacheFlush() { func (f *Fs) DirCacheFlush() {
@@ -1741,7 +1395,6 @@ var (
_ fs.DirCacheFlusher = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil) _ fs.IDer = (*Object)(nil)
) )

View File

@@ -105,7 +105,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
const defaultDelay = 10 const defaultDelay = 10
var tries int var tries int
outer: outer:
for tries = range maxTries { for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil) resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
if err != nil { if err != nil {
@@ -203,7 +203,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
errs := make(chan error, 1) errs := make(chan error, 1)
var wg sync.WaitGroup var wg sync.WaitGroup
outer: outer:
for part := range session.TotalParts { for part := 0; part < session.TotalParts; part++ {
// Check any errors // Check any errors
select { select {
case err = <-errs: case err = <-errs:
@@ -211,7 +211,10 @@ outer:
default: default:
} }
reqSize := min(remaining, chunkSize) reqSize := remaining
if reqSize >= chunkSize {
reqSize = chunkSize
}
// Make a block of memory // Make a block of memory
buf := make([]byte, reqSize) buf := make([]byte, reqSize)

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js
// Package cache implements a virtual provider to cache existing remotes. // Package cache implements a virtual provider to cache existing remotes.
package cache package cache
@@ -29,7 +30,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/atexit"
@@ -78,7 +78,6 @@ func init() {
}, { }, {
Name: "plex_username", Name: "plex_username",
Help: "The username of the Plex user.", Help: "The username of the Plex user.",
Sensitive: true,
}, { }, {
Name: "plex_password", Name: "plex_password",
Help: "The password of the Plex user.", Help: "The password of the Plex user.",
@@ -88,7 +87,6 @@ func init() {
Help: "The plex token for authentication - auto set normally.", Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "plex_insecure", Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server.", Help: "Skip all certificate verification when connecting to the Plex server.",
@@ -410,7 +408,8 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err) return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
} }
} else if opt.PlexPassword != "" && opt.PlexUsername != "" { } else {
if opt.PlexPassword != "" && opt.PlexUsername != "" {
decPass, err := obscure.Reveal(opt.PlexPassword) decPass, err := obscure.Reveal(opt.PlexPassword)
if err != nil { if err != nil {
decPass = opt.PlexPassword decPass = opt.PlexPassword
@@ -423,6 +422,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
} }
} }
} }
}
dbPath := f.opt.DbPath dbPath := f.opt.DbPath
chunkPath := f.opt.ChunkPath chunkPath := f.opt.ChunkPath
@@ -1038,7 +1038,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
fs.Debugf(dir, "list: remove entry: %v", entryRemote) fs.Debugf(dir, "list: remove entry: %v", entryRemote)
} }
entries = nil //nolint:ineffassign entries = nil
// and then iterate over the ones from source (temp Objects will override source ones) // and then iterate over the ones from source (temp Objects will override source ones)
var batchDirectories []*Directory var batchDirectories []*Directory
@@ -1087,13 +1087,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return cachedEntries, nil return cachedEntries, nil
} }
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error { func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
entries, err := f.List(ctx, dir) entries, err := f.List(ctx, dir)
if err != nil { if err != nil {
return err return err
} }
for i := range entries { for i := 0; i < len(entries); i++ {
innerDir, ok := entries[i].(fs.Directory) innerDir, ok := entries[i].(fs.Directory)
if ok { if ok {
err := f.recurse(ctx, innerDir.Remote(), list) err := f.recurse(ctx, innerDir.Remote(), list)
@@ -1139,7 +1139,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
} }
// if we're here, we're gonna do a standard recursive traversal and cache everything // if we're here, we're gonna do a standard recursive traversal and cache everything
list := list.NewHelper(callback) list := walk.NewListRHelper(callback)
err = f.recurse(ctx, dir, list) err = f.recurse(ctx, dir, list)
if err != nil { if err != nil {
return err return err
@@ -1429,7 +1429,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
}() }()
// wait until both are done // wait until both are done
for range 2 { for c := 0; c < 2; c++ {
<-done <-done
} }
} }
@@ -1754,7 +1754,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
} }
// Stats returns stats about the cache storage // Stats returns stats about the cache storage
func (f *Fs) Stats() (map[string]map[string]any, error) { func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
return f.cache.Stats() return f.cache.Stats()
} }
@@ -1787,7 +1787,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
} }
} }
// StopBackgroundRunners will signal all the runners to stop their work // StopBackgroundRunners will signall all the runners to stop their work
// can be triggered from a terminate signal or from testing between runs // can be triggered from a terminate signal or from testing between runs
func (f *Fs) StopBackgroundRunners() { func (f *Fs) StopBackgroundRunners() {
f.cleanupChan <- false f.cleanupChan <- false
@@ -1934,7 +1934,7 @@ var commandHelp = []fs.CommandHelp{
// The result should be capable of being JSON encoded // The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user // If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that // otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name { switch name {
case "stats": case "stats":
return f.Stats() return f.Stats()

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js && !race //go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test package cache_test
@@ -10,6 +11,8 @@ import (
goflag "flag" goflag "flag"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"log"
"math/rand" "math/rand"
"os" "os"
"path" "path"
@@ -28,11 +31,10 @@ import (
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/testy" "github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -92,7 +94,7 @@ func TestMain(m *testing.M) {
goflag.Parse() goflag.Parse()
var rc int var rc int
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName) log.Printf("Running with the following params: \n remote: %v", remoteName)
runInstance = newRun() runInstance = newRun()
rc = m.Run() rc = m.Run()
os.Exit(rc) os.Exit(rc)
@@ -100,12 +102,14 @@ func TestMain(m *testing.M) {
func TestInternalListRootAndInnerRemotes(t *testing.T) { func TestInternalListRootAndInnerRemotes(t *testing.T) {
id := fmt.Sprintf("tilrair%v", time.Now().Unix()) id := fmt.Sprintf("tilrair%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
// Instantiate inner fs // Instantiate inner fs
innerFolder := "inner" innerFolder := "inner"
runInstance.mkdir(t, rootFs, innerFolder) runInstance.mkdir(t, rootFs, innerFolder)
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil) rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
runInstance.writeObjectString(t, rootFs2, "one", "content") runInstance.writeObjectString(t, rootFs2, "one", "content")
listRoot, err := runInstance.list(t, rootFs, "") listRoot, err := runInstance.list(t, rootFs, "")
@@ -122,10 +126,10 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
/* TODO: is this testing something? /* TODO: is this testing something?
func TestInternalVfsCache(t *testing.T) { func TestInternalVfsCache(t *testing.T) {
vfscommon.Opt.DirCacheTime = time.Second * 30 vfsflags.Opt.DirCacheTime = time.Second * 30
testSize := int64(524288000) testSize := int64(524288000)
vfscommon.Opt.CacheMode = vfs.CacheModeWrites vfsflags.Opt.CacheMode = vfs.CacheModeWrites
id := "tiuufo" id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -163,7 +167,7 @@ func TestInternalVfsCache(t *testing.T) {
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")} li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
for _, r := range li2 { for _, r := range li2 {
var err error var err error
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r)))) ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
if err != nil || len(ci) == 0 { if err != nil || len(ci) == 0 {
log.Printf("========== '%v' not in cache", r) log.Printf("========== '%v' not in cache", r)
} else { } else {
@@ -222,7 +226,8 @@ func TestInternalVfsCache(t *testing.T) {
func TestInternalObjWrapFsFound(t *testing.T) { func TestInternalObjWrapFsFound(t *testing.T) {
id := fmt.Sprintf("tiowff%v", time.Now().Unix()) id := fmt.Sprintf("tiowff%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -254,7 +259,8 @@ func TestInternalObjWrapFsFound(t *testing.T) {
func TestInternalObjNotFound(t *testing.T) { func TestInternalObjNotFound(t *testing.T) {
id := fmt.Sprintf("tionf%v", time.Now().Unix()) id := fmt.Sprintf("tionf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
obj, err := rootFs.NewObject(context.Background(), "404") obj, err := rootFs.NewObject(context.Background(), "404")
require.Error(t, err) require.Error(t, err)
@@ -264,7 +270,8 @@ func TestInternalObjNotFound(t *testing.T) {
func TestInternalCachedWrittenContentMatches(t *testing.T) { func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t) testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix()) id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -291,7 +298,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
t.Skip("Skip test on windows/386") t.Skip("Skip test on windows/386")
} }
id := fmt.Sprintf("tidwcm%v", time.Now().Unix()) id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
// write the object // write the object
runInstance.writeRemoteString(t, rootFs, "one", "one content") runInstance.writeRemoteString(t, rootFs, "one", "one content")
@@ -309,7 +317,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
func TestInternalCachedUpdatedContentMatches(t *testing.T) { func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t) testy.SkipUnreliable(t)
id := fmt.Sprintf("ticucm%v", time.Now().Unix()) id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
var err error var err error
// create some rand test data // create some rand test data
@@ -337,8 +346,9 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
func TestInternalWrappedWrittenContentMatches(t *testing.T) { func TestInternalWrappedWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix()) id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second) vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote") t.Skip("test skipped with crypt remote")
} }
@@ -360,15 +370,16 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(len(checkSample)), o.Size()) require.Equal(t, int64(len(checkSample)), o.Size())
for i := range checkSample { for i := 0; i < len(checkSample); i++ {
require.Equal(t, testData[i], checkSample[i]) require.Equal(t, testData[i], checkSample[i])
} }
} }
func TestInternalLargeWrittenContentMatches(t *testing.T) { func TestInternalLargeWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tilwcm%v", time.Now().Unix()) id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second) vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote") t.Skip("test skipped with crypt remote")
} }
@@ -387,14 +398,15 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false) readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
require.NoError(t, err) require.NoError(t, err)
for i := range readData { for i := 0; i < len(readData); i++ {
require.Equalf(t, testData[i], readData[i], "at byte %v", i) require.Equalf(t, testData[i], readData[i], "at byte %v", i)
} }
} }
func TestInternalWrappedFsChangeNotSeen(t *testing.T) { func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix()) id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -407,7 +419,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
// update in the wrapped fs // update in the wrapped fs
originalSize, err := runInstance.size(t, rootFs, "data.bin") originalSize, err := runInstance.size(t, rootFs, "data.bin")
require.NoError(t, err) require.NoError(t, err)
fs.Logf(nil, "original size: %v", originalSize) log.Printf("original size: %v", originalSize)
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
@@ -416,7 +428,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64) data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
require.NoError(t, err) require.NoError(t, err)
expectedSize++ // FIXME newline gets in, likely test data issue expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
} else { } else {
data2 = []byte("test content") data2 = []byte("test content")
} }
@@ -424,7 +436,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo) err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(len(data2)), o.Size()) require.Equal(t, int64(len(data2)), o.Size())
fs.Logf(nil, "updated size: %v", len(data2)) log.Printf("updated size: %v", len(data2))
// get a new instance from the cache // get a new instance from the cache
if runInstance.wrappedIsExternal { if runInstance.wrappedIsExternal {
@@ -448,7 +460,8 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
func TestInternalMoveWithNotify(t *testing.T) { func TestInternalMoveWithNotify(t *testing.T) {
id := fmt.Sprintf("timwn%v", time.Now().Unix()) id := fmt.Sprintf("timwn%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.wrappedIsExternal { if !runInstance.wrappedIsExternal {
t.Skipf("Not external") t.Skipf("Not external")
} }
@@ -484,49 +497,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
err = runInstance.retryBlock(func() error { err = runInstance.retryBlock(func() error {
li, err := runInstance.list(t, rootFs, "test") li, err := runInstance.list(t, rootFs, "test")
if err != nil { if err != nil {
fs.Logf(nil, "err: %v", err) log.Printf("err: %v", err)
return err return err
} }
if len(li) != 2 { if len(li) != 2 {
fs.Logf(nil, "not expected listing /test: %v", li) log.Printf("not expected listing /test: %v", li)
return fmt.Errorf("not expected listing /test: %v", li) return fmt.Errorf("not expected listing /test: %v", li)
} }
li, err = runInstance.list(t, rootFs, "test/one") li, err = runInstance.list(t, rootFs, "test/one")
if err != nil { if err != nil {
fs.Logf(nil, "err: %v", err) log.Printf("err: %v", err)
return err return err
} }
if len(li) != 0 { if len(li) != 0 {
fs.Logf(nil, "not expected listing /test/one: %v", li) log.Printf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li) return fmt.Errorf("not expected listing /test/one: %v", li)
} }
li, err = runInstance.list(t, rootFs, "test/second") li, err = runInstance.list(t, rootFs, "test/second")
if err != nil { if err != nil {
fs.Logf(nil, "err: %v", err) log.Printf("err: %v", err)
return err return err
} }
if len(li) != 1 { if len(li) != 1 {
fs.Logf(nil, "not expected listing /test/second: %v", li) log.Printf("not expected listing /test/second: %v", li)
return fmt.Errorf("not expected listing /test/second: %v", li) return fmt.Errorf("not expected listing /test/second: %v", li)
} }
if fi, ok := li[0].(os.FileInfo); ok { if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "data.bin" { if fi.Name() != "data.bin" {
fs.Logf(nil, "not expected name: %v", fi.Name()) log.Printf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name()) return fmt.Errorf("not expected name: %v", fi.Name())
} }
} else if di, ok := li[0].(fs.DirEntry); ok { } else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/second/data.bin" { if di.Remote() != "test/second/data.bin" {
fs.Logf(nil, "not expected remote: %v", di.Remote()) log.Printf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote()) return fmt.Errorf("not expected remote: %v", di.Remote())
} }
} else { } else {
fs.Logf(nil, "unexpected listing: %v", li) log.Printf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li) return fmt.Errorf("unexpected listing: %v", li)
} }
fs.Logf(nil, "complete listing: %v", li) log.Printf("complete listing: %v", li)
return nil return nil
}, 12, time.Second*10) }, 12, time.Second*10)
require.NoError(t, err) require.NoError(t, err)
@@ -534,7 +547,8 @@ func TestInternalMoveWithNotify(t *testing.T) {
func TestInternalNotifyCreatesEmptyParts(t *testing.T) { func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
id := fmt.Sprintf("tincep%v", time.Now().Unix()) id := fmt.Sprintf("tincep%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.wrappedIsExternal { if !runInstance.wrappedIsExternal {
t.Skipf("Not external") t.Skipf("Not external")
} }
@@ -576,43 +590,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
err = runInstance.retryBlock(func() error { err = runInstance.retryBlock(func() error {
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"))) found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
if !found { if !found {
fs.Logf(nil, "not found /test") log.Printf("not found /test")
return fmt.Errorf("not found /test") return fmt.Errorf("not found /test")
} }
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"))) found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
if !found { if !found {
fs.Logf(nil, "not found /test/one") log.Printf("not found /test/one")
return fmt.Errorf("not found /test/one") return fmt.Errorf("not found /test/one")
} }
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2"))) found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
if !found { if !found {
fs.Logf(nil, "not found /test/one/test2") log.Printf("not found /test/one/test2")
return fmt.Errorf("not found /test/one/test2") return fmt.Errorf("not found /test/one/test2")
} }
li, err := runInstance.list(t, rootFs, "test/one") li, err := runInstance.list(t, rootFs, "test/one")
if err != nil { if err != nil {
fs.Logf(nil, "err: %v", err) log.Printf("err: %v", err)
return err return err
} }
if len(li) != 1 { if len(li) != 1 {
fs.Logf(nil, "not expected listing /test/one: %v", li) log.Printf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li) return fmt.Errorf("not expected listing /test/one: %v", li)
} }
if fi, ok := li[0].(os.FileInfo); ok { if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "test2" { if fi.Name() != "test2" {
fs.Logf(nil, "not expected name: %v", fi.Name()) log.Printf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name()) return fmt.Errorf("not expected name: %v", fi.Name())
} }
} else if di, ok := li[0].(fs.DirEntry); ok { } else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/one/test2" { if di.Remote() != "test/one/test2" {
fs.Logf(nil, "not expected remote: %v", di.Remote()) log.Printf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote()) return fmt.Errorf("not expected remote: %v", di.Remote())
} }
} else { } else {
fs.Logf(nil, "unexpected listing: %v", li) log.Printf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li) return fmt.Errorf("unexpected listing: %v", li)
} }
fs.Logf(nil, "complete listing /test/one/test2") log.Printf("complete listing /test/one/test2")
return nil return nil
}, 12, time.Second*10) }, 12, time.Second*10)
require.NoError(t, err) require.NoError(t, err)
@@ -620,7 +634,8 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) { func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix()) id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -652,7 +667,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
func TestInternalCacheWrites(t *testing.T) { func TestInternalCacheWrites(t *testing.T) {
id := "ticw" id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -673,7 +689,8 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
t.Skip("Skip test on windows/386") t.Skip("Skip test on windows/386")
} }
id := fmt.Sprintf("timcsr%v", time.Now().Unix()) id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -688,7 +705,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
co, ok := o.(*cache.Object) co, ok := o.(*cache.Object)
require.True(t, ok) require.True(t, ok)
for i := range 4 { // read first 4 for i := 0; i < 4; i++ { // read first 4
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false) _ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
} }
cfs.CleanUpCache(true) cfs.CleanUpCache(true)
@@ -707,8 +724,9 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
func TestInternalExpiredEntriesRemoved(t *testing.T) { func TestInternalExpiredEntriesRemoved(t *testing.T) {
id := fmt.Sprintf("tieer%v", time.Now().Unix()) id := fmt.Sprintf("tieer%v", time.Now().Unix())
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -742,10 +760,12 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
} }
func TestInternalBug2117(t *testing.T) { func TestInternalBug2117(t *testing.T) {
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10) vfsflags.Opt.DirCacheTime = time.Second * 10
id := fmt.Sprintf("tib2117%v", time.Now().Unix()) id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
t.Skipf("skipping crypt") t.Skipf("skipping crypt")
@@ -770,24 +790,24 @@ func TestInternalBug2117(t *testing.T) {
di, err := runInstance.list(t, rootFs, "test/dir1/dir2") di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
require.NoError(t, err) require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di)) log.Printf("len: %v", len(di))
require.Len(t, di, 1) require.Len(t, di, 1)
time.Sleep(time.Second * 30) time.Sleep(time.Second * 30)
di, err = runInstance.list(t, rootFs, "test/dir1/dir2") di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
require.NoError(t, err) require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di)) log.Printf("len: %v", len(di))
require.Len(t, di, 1) require.Len(t, di, 1)
di, err = runInstance.list(t, rootFs, "test/dir1") di, err = runInstance.list(t, rootFs, "test/dir1")
require.NoError(t, err) require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di)) log.Printf("len: %v", len(di))
require.Len(t, di, 4) require.Len(t, di, 4)
di, err = runInstance.list(t, rootFs, "test") di, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err) require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di)) log.Printf("len: %v", len(di))
require.Len(t, di, 4) require.Len(t, di, 4)
} }
@@ -821,14 +841,14 @@ func newRun() *run {
} }
if uploadDir == "" { if uploadDir == "" {
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp") r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
if err != nil { if err != nil {
panic(fmt.Sprintf("Failed to create temp dir: %v", err)) panic(fmt.Sprintf("Failed to create temp dir: %v", err))
} }
} else { } else {
r.tmpUploadDir = uploadDir r.tmpUploadDir = uploadDir
} }
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir) log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
return r return r
} }
@@ -846,11 +866,11 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
return enc return enc
} }
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) { func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise() fstest.Initialise()
remoteExists := false remoteExists := false
for _, s := range config.GetRemotes() { for _, s := range config.FileSections() {
if s.Name == remote { if s == remote {
remoteExists = true remoteExists = true
} }
} }
@@ -874,12 +894,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
cacheRemote := remote cacheRemote := remote
if !remoteExists { if !remoteExists {
localRemote := remote + "-local" localRemote := remote + "-local"
config.FileSetValue(localRemote, "type", "local") config.FileSet(localRemote, "type", "local")
config.FileSetValue(localRemote, "nounc", "true") config.FileSet(localRemote, "nounc", "true")
m.Set("type", "cache") m.Set("type", "cache")
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote)) m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
} else { } else {
remoteType := config.GetValue(remote, "type") remoteType := config.FileGet(remote, "type")
if remoteType == "" { if remoteType == "" {
t.Skipf("skipped due to invalid remote type for %v", remote) t.Skipf("skipped due to invalid remote type for %v", remote)
return nil, nil return nil, nil
@@ -890,14 +910,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
m.Set("password", cryptPassword1) m.Set("password", cryptPassword1)
m.Set("password2", cryptPassword2) m.Set("password2", cryptPassword2)
} }
remoteRemote := config.GetValue(remote, "remote") remoteRemote := config.FileGet(remote, "remote")
if remoteRemote == "" { if remoteRemote == "" {
t.Skipf("skipped due to invalid remote wrapper for %v", remote) t.Skipf("skipped due to invalid remote wrapper for %v", remote)
return nil, nil return nil, nil
} }
remoteRemoteParts := strings.Split(remoteRemote, ":") remoteRemoteParts := strings.Split(remoteRemote, ":")
remoteWrapping := remoteRemoteParts[0] remoteWrapping := remoteRemoteParts[0]
remoteType := config.GetValue(remoteWrapping, "type") remoteType := config.FileGet(remoteWrapping, "type")
if remoteType != "cache" { if remoteType != "cache" {
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType) t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
return nil, nil return nil, nil
@@ -934,20 +954,16 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
} }
if purge { if purge {
_ = operations.Purge(context.Background(), f, "") _ = f.Features().Purge(context.Background(), "")
require.NoError(t, err)
} }
err = f.Mkdir(context.Background(), "") err = f.Mkdir(context.Background(), "")
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() {
runInstance.cleanupFs(t, f)
})
return f, boltDb return f, boltDb
} }
func (r *run) cleanupFs(t *testing.T, f fs.Fs) { func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
err := operations.Purge(context.Background(), f, "") err := f.Features().Purge(context.Background(), "")
require.NoError(t, err) require.NoError(t, err)
cfs, err := r.getCacheFs(f) cfs, err := r.getCacheFs(f)
require.NoError(t, err) require.NoError(t, err)
@@ -968,10 +984,10 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
chunk := int64(1024) chunk := int64(1024)
cnt := size / chunk cnt := size / chunk
left := size % chunk left := size % chunk
f, err := os.CreateTemp("", "rclonecache-tempfile") f, err := ioutil.TempFile("", "rclonecache-tempfile")
require.NoError(t, err) require.NoError(t, err)
for range int(cnt) { for i := 0; i < int(cnt); i++ {
data := randStringBytes(int(chunk)) data := randStringBytes(int(chunk))
_, _ = f.Write(data) _, _ = f.Write(data)
} }
@@ -1085,9 +1101,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
return err return err
} }
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) { func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
var err error var err error
var l []any var l []interface{}
var list fs.DirEntries var list fs.DirEntries
list, err = f.List(context.Background(), remote) list, err = f.List(context.Background(), remote)
for _, ll := range list { for _, ll := range list {
@@ -1096,6 +1112,27 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) {
return l, err return l, err
} }
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer func() {
_ = in.Close()
}()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
return err
}
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error { func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error var err error
@@ -1191,7 +1228,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
func (r *run) cleanSize(t *testing.T, size int64) int64 { func (r *run) cleanSize(t *testing.T, size int64) int64 {
if r.rootIsCrypt { if r.rootIsCrypt {
denominator := int64(65536 + 16) denominator := int64(65536 + 16)
size -= 32 size = size - 32
quotient := size / denominator quotient := size / denominator
remainder := size % denominator remainder := size % denominator
return (quotient*65536 + remainder - 16) return (quotient*65536 + remainder - 16)
@@ -1215,7 +1252,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
var err error var err error
var state cache.BackgroundUploadState var state cache.BackgroundUploadState
for range 2 { for i := 0; i < 2; i++ {
select { select {
case state = <-buCh: case state = <-buCh:
// continue // continue
@@ -1293,7 +1330,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error { func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
var err error var err error
for range maxRetries { for i := 0; i < maxRetries; i++ {
err = block() err = block()
if err == nil { if err == nil {
return nil return nil

View File

@@ -1,6 +1,7 @@
// Test Cache filesystem interface // Test Cache filesystem interface
//go:build !plan9 && !js && !race //go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test package cache_test
@@ -17,9 +18,8 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:", RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil), NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"}, UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"}, UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
}) })
} }

View File

@@ -2,6 +2,6 @@
// about "no buildable Go source files " // about "no buildable Go source files "
//go:build plan9 || js //go:build plan9 || js
// +build plan9 js
// Package cache implements a virtual provider to cache existing remotes.
package cache package cache

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js && !race //go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test package cache_test
@@ -20,8 +21,10 @@ import (
func TestInternalUploadTempDirCreated(t *testing.T) { func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix()) id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
runInstance.newCacheFs(t, remoteName, id, false, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id)) _, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err) require.NoError(t, err)
@@ -60,7 +63,9 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
func TestInternalUploadQueueOneFileNoRest(t *testing.T) { func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix()) id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb) testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
} }
@@ -68,15 +73,19 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
func TestInternalUploadQueueOneFileWithRest(t *testing.T) { func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix()) id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb) testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
} }
func TestInternalUploadMoveExistingFile(t *testing.T) { func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix()) id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one") err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err) require.NoError(t, err)
@@ -110,8 +119,10 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
func TestInternalUploadTempPathCleaned(t *testing.T) { func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix()) id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"}) map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one") err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err) require.NoError(t, err)
@@ -151,19 +162,21 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
func TestInternalUploadQueueMoreFiles(t *testing.T) { func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix()) id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test") err := rootFs.Mkdir(context.Background(), "test")
require.NoError(t, err) require.NoError(t, err)
minSize := 5242880 minSize := 5242880
maxSize := 10485760 maxSize := 10485760
totalFiles := 10 totalFiles := 10
randInstance := rand.New(rand.NewSource(time.Now().Unix())) rand.Seed(time.Now().Unix())
lastFile := "" lastFile := ""
for i := range totalFiles { for i := 0; i < totalFiles; i++ {
size := int64(randInstance.Intn(maxSize-minSize) + minSize) size := int64(rand.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size) testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin" remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader) runInstance.writeRemoteReader(t, rootFs, remote, testReader)
@@ -200,7 +213,9 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
func TestInternalUploadTempFileOperations(t *testing.T) { func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo" id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads() boltDb.PurgeTempUploads()
@@ -328,7 +343,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
func TestInternalUploadUploadingFileOperations(t *testing.T) { func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo" id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads() boltDb.PurgeTempUploads()

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js
package cache package cache

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js
package cache package cache
@@ -118,7 +119,7 @@ func (r *Handle) startReadWorkers() {
r.scaleWorkers(totalWorkers) r.scaleWorkers(totalWorkers)
} }
// scaleWorkers will increase the worker pool count by the provided amount // scaleOutWorkers will increase the worker pool count by the provided amount
func (r *Handle) scaleWorkers(desired int) { func (r *Handle) scaleWorkers(desired int) {
current := r.workers current := r.workers
if current == desired { if current == desired {
@@ -182,7 +183,7 @@ func (r *Handle) queueOffset(offset int64) {
} }
} }
for i := range r.workers { for i := 0; i < r.workers; i++ {
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i) o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
if o < 0 || o >= r.cachedObject.Size() { if o < 0 || o >= r.cachedObject.Size() {
continue continue
@@ -208,7 +209,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize) offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
// we align the start offset of the first chunk to a likely chunk in the storage // we align the start offset of the first chunk to a likely chunk in the storage
chunkStart -= offset chunkStart = chunkStart - offset
r.queueOffset(chunkStart) r.queueOffset(chunkStart)
found := false found := false
@@ -222,7 +223,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
if !found { if !found {
// we're gonna give the workers a chance to pickup the chunk // we're gonna give the workers a chance to pickup the chunk
// and retry a couple of times // and retry a couple of times
for i := range r.cacheFs().opt.ReadRetries * 8 { for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
data, err = r.storage().GetChunk(r.cachedObject, chunkStart) data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
if err == nil { if err == nil {
found = true found = true
@@ -327,7 +328,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize)) chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) { if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
chunkStart -= int64(r.cacheFs().opt.ChunkSize) chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
} }
r.queueOffset(chunkStart) r.queueOffset(chunkStart)
@@ -415,9 +416,11 @@ func (w *worker) run() {
continue continue
} }
} }
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) { } else {
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
continue continue
} }
}
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize) chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
// TODO: Remove this comment if it proves to be reliable for #1896 // TODO: Remove this comment if it proves to be reliable for #1896

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js
package cache package cache

13
backend/cache/plex.go vendored
View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js
package cache package cache
@@ -7,7 +8,7 @@ import (
"crypto/tls" "crypto/tls"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
@@ -166,7 +167,7 @@ func (p *plexConnector) listenWebsocket() {
continue continue
} }
var data []byte var data []byte
data, err = io.ReadAll(resp.Body) data, err = ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
continue continue
} }
@@ -209,7 +210,7 @@ func (p *plexConnector) authenticate() error {
if err != nil { if err != nil {
return err return err
} }
var data map[string]any var data map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&data) err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil { if err != nil {
return fmt.Errorf("failed to obtain token: %w", err) return fmt.Errorf("failed to obtain token: %w", err)
@@ -273,11 +274,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
} }
// adapted from: https://stackoverflow.com/a/28878037 (credit) // adapted from: https://stackoverflow.com/a/28878037 (credit)
func get(m any, path ...any) (any, bool) { func get(m interface{}, path ...interface{}) (interface{}, bool) {
for _, p := range path { for _, p := range path {
switch idx := p.(type) { switch idx := p.(type) {
case string: case string:
if mm, ok := m.(map[string]any); ok { if mm, ok := m.(map[string]interface{}); ok {
if val, found := mm[idx]; found { if val, found := mm[idx]; found {
m = val m = val
continue continue
@@ -285,7 +286,7 @@ func get(m any, path ...any) (any, bool) {
} }
return nil, false return nil, false
case int: case int:
if mm, ok := m.([]any); ok { if mm, ok := m.([]interface{}); ok {
if len(mm) > idx { if len(mm) > idx {
m = mm[idx] m = mm[idx]
continue continue

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js
package cache package cache

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js
package cache package cache
@@ -8,6 +9,7 @@ import (
"encoding/binary" "encoding/binary"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path" "path"
"strconv" "strconv"
@@ -18,7 +20,6 @@ import (
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt"
"go.etcd.io/bbolt/errors"
) )
// Constants // Constants
@@ -472,7 +473,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
var data []byte var data []byte
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10)) fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
data, err := os.ReadFile(fp) data, err := ioutil.ReadFile(fp)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -485,7 +486,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm) _ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10)) filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
err := os.WriteFile(filePath, data, os.ModePerm) err := ioutil.WriteFile(filePath, data, os.ModePerm)
if err != nil { if err != nil {
return err return err
} }
@@ -598,7 +599,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
}) })
if err != nil { if err != nil {
if err == errors.ErrDatabaseNotOpen { if err == bolt.ErrDatabaseNotOpen {
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore // we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
return return
} }
@@ -607,16 +608,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
} }
// Stats returns a go map with the stats key values // Stats returns a go map with the stats key values
func (b *Persistent) Stats() (map[string]map[string]any, error) { func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
r := make(map[string]map[string]any) r := make(map[string]map[string]interface{})
r["data"] = make(map[string]any) r["data"] = make(map[string]interface{})
r["data"]["oldest-ts"] = time.Now() r["data"]["oldest-ts"] = time.Now()
r["data"]["oldest-file"] = "" r["data"]["oldest-file"] = ""
r["data"]["newest-ts"] = time.Now() r["data"]["newest-ts"] = time.Now()
r["data"]["newest-file"] = "" r["data"]["newest-file"] = ""
r["data"]["total-chunks"] = 0 r["data"]["total-chunks"] = 0
r["data"]["total-size"] = int64(0) r["data"]["total-size"] = int64(0)
r["files"] = make(map[string]any) r["files"] = make(map[string]interface{})
r["files"]["oldest-ts"] = time.Now() r["files"]["oldest-ts"] = time.Now()
r["files"]["oldest-name"] = "" r["files"]["oldest-name"] = ""
r["files"]["newest-ts"] = time.Now() r["files"]["newest-ts"] = time.Now()

View File

@@ -1,6 +1,3 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache package cache
import bolt "go.etcd.io/bbolt" import bolt "go.etcd.io/bbolt"

View File

@@ -12,6 +12,7 @@ import (
"fmt" "fmt"
gohash "hash" gohash "hash"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"path" "path"
"regexp" "regexp"
@@ -29,7 +30,6 @@ import (
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/encoder"
) )
// Chunker's composite files have one or more chunks // Chunker's composite files have one or more chunks
@@ -102,10 +102,8 @@ var (
// //
// And still chunker's primary function is to chunk large files // And still chunker's primary function is to chunk large files
// rather than serve as a generic metadata container. // rather than serve as a generic metadata container.
const ( const maxMetadataSize = 1023
maxMetadataSize = 1023 const maxMetadataSizeWritten = 255
maxMetadataSizeWritten = 255
)
// Current/highest supported metadata format. // Current/highest supported metadata format.
const metadataVersion = 2 const metadataVersion = 2
@@ -308,6 +306,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
root: rpath, root: rpath,
opt: *opt, opt: *opt,
} }
cache.PinUntilFinalized(f.base, f)
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm. f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil { if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
@@ -319,23 +318,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// i.e. `rpath` does not exist in the wrapped remote, but chunker // i.e. `rpath` does not exist in the wrapped remote, but chunker
// detects a composite file because it finds the first chunk! // detects a composite file because it finds the first chunk!
// (yet can't satisfy fstest.CheckListing, will ignore) // (yet can't satisfy fstest.CheckListing, will ignore)
if err == nil && !f.useMeta { if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
firstChunkPath := f.makeChunkName(remotePath, 0, "", "") firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
newBase, testErr := cache.Get(ctx, baseName+firstChunkPath) _, testErr := cache.Get(ctx, baseName+firstChunkPath)
if testErr == fs.ErrorIsFile { if testErr == fs.ErrorIsFile {
f.base = newBase
err = testErr err = testErr
} }
} }
cache.PinUntilFinalized(f.base, f)
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
// Note 1: the features here are ones we could support, and they are // Note 1: the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs. // ANDed with the ones from wrappedFs.
@@ -349,15 +338,9 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
BucketBased: true, BucketBased: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: true, ServerSideAcrossConfigs: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs) }).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
f.features.ListR = nil // Recursive listing may cause chunker skip files f.features.Disable("ListR") // Recursive listing may cause chunker skip files
f.features.ListP = nil // ListP not supported yet
return f, err return f, err
} }
@@ -633,7 +616,7 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
// forbidChunk prints error message or raises error if file is chunk. // forbidChunk prints error message or raises error if file is chunk.
// First argument sets log prefix, use `false` to suppress message. // First argument sets log prefix, use `false` to suppress message.
func (f *Fs) forbidChunk(o any, filePath string) error { func (f *Fs) forbidChunk(o interface{}, filePath string) error {
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" { if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
if f.opt.FailHard { if f.opt.FailHard {
return fmt.Errorf("chunk overlap with %q", parentPath) return fmt.Errorf("chunk overlap with %q", parentPath)
@@ -681,7 +664,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
circleSec := unixSec % closestPrimeZzzzSeconds circleSec := unixSec % closestPrimeZzzzSeconds
first4chars := strconv.FormatInt(circleSec, 36) first4chars := strconv.FormatInt(circleSec, 36)
for range maxTransactionProbes { for tries := 0; tries < maxTransactionProbes; tries++ {
f.xactIDMutex.Lock() f.xactIDMutex.Lock()
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1) randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
f.xactIDMutex.Unlock() f.xactIDMutex.Unlock()
@@ -831,7 +814,8 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
} }
case fs.Directory: case fs.Directory:
isSubdir[entry.Remote()] = true isSubdir[entry.Remote()] = true
wrapDir := fs.NewDirWrapper(entry.Remote(), entry) wrapDir := fs.NewDirCopy(ctx, entry)
wrapDir.SetRemote(entry.Remote())
tempEntries = append(tempEntries, wrapDir) tempEntries = append(tempEntries, wrapDir)
default: default:
if f.opt.FailHard { if f.opt.FailHard {
@@ -964,11 +948,6 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
} }
if caseInsensitive { if caseInsensitive {
sameMain = strings.EqualFold(mainRemote, remote) sameMain = strings.EqualFold(mainRemote, remote)
if sameMain && f.base.Features().IsLocal {
// on local, make sure the EqualFold still holds true when accounting for encoding.
// sometimes paths with special characters will only normalize the same way in Standard Encoding.
sameMain = strings.EqualFold(encoder.OS.FromStandardPath(mainRemote), encoder.OS.FromStandardPath(remote))
}
} else { } else {
sameMain = mainRemote == remote sameMain = mainRemote == remote
} }
@@ -982,13 +961,13 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
} }
continue continue
} }
// fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo) //fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
if err := o.addChunk(entry, chunkNo); err != nil { if err := o.addChunk(entry, chunkNo); err != nil {
return nil, err return nil, err
} }
} }
if o.main == nil && len(o.chunks) == 0 { if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
// Scanning hasn't found data chunks with conforming names. // Scanning hasn't found data chunks with conforming names.
if f.useMeta || quickScan { if f.useMeta || quickScan {
// Metadata is required but absent and there are no chunks. // Metadata is required but absent and there are no chunks.
@@ -1059,7 +1038,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
metadata, err := io.ReadAll(reader) metadata, err := ioutil.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows _ = reader.Close() // ensure file handle is freed on windows
if err != nil { if err != nil {
return err return err
@@ -1118,7 +1097,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
if err != nil { if err != nil {
return "", err return "", err
} }
data, err := io.ReadAll(reader) data, err := ioutil.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows _ = reader.Close() // ensure file handle is freed on windows
if err != nil { if err != nil {
return "", err return "", err
@@ -1144,8 +1123,8 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
// put implements Put, PutStream, PutUnchecked, Update // put implements Put, PutStream, PutUnchecked, Update
func (f *Fs) put( func (f *Fs) put(
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption, ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
basePut putFn, action string, target fs.Object, basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
) (obj fs.Object, err error) {
// Perform consistency checks // Perform consistency checks
if err := f.forbidChunk(src, remote); err != nil { if err := f.forbidChunk(src, remote); err != nil {
return nil, fmt.Errorf("%s refused: %w", action, err) return nil, fmt.Errorf("%s refused: %w", action, err)
@@ -1190,7 +1169,10 @@ func (f *Fs) put(
} }
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID) tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
size := min(c.sizeLeft, c.chunkSize) size := c.sizeLeft
if size > c.chunkSize {
size = c.chunkSize
}
savedReadCount := c.readCount savedReadCount := c.readCount
// If a single chunk is expected, avoid the extra rename operation // If a single chunk is expected, avoid the extra rename operation
@@ -1475,7 +1457,10 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
const bufLen = 1048576 // 1 MiB const bufLen = 1048576 // 1 MiB
buf := make([]byte, bufLen) buf := make([]byte, bufLen)
for size > 0 { for size > 0 {
n := min(size, bufLen) n := size
if n > bufLen {
n = bufLen
}
if _, err := io.ReadFull(in, buf[0:n]); err != nil { if _, err := io.ReadFull(in, buf[0:n]); err != nil {
return err return err
} }
@@ -1579,14 +1564,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.base.Mkdir(ctx, dir) return f.base.Mkdir(ctx, dir)
} }
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
if do := f.base.Features().MkdirMetadata; do != nil {
return do(ctx, dir, metadata)
}
return nil, fs.ErrorNotImplemented
}
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
@@ -1861,8 +1838,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// baseMove chains to the wrapped Move or simulates it by Copy+Delete // baseMove chains to the wrapped Move or simulates it by Copy+Delete
func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) { func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) {
ctx, ci := fs.AddConfig(ctx)
ci.NameTransform = nil // ensure operations.Move does not double-transform here
var ( var (
dest fs.Object dest fs.Object
err error err error
@@ -1906,14 +1881,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return do(ctx, srcFs.base, srcRemote, dstRemote) return do(ctx, srcFs.base, srcRemote, dstRemote)
} }
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
if do := f.base.Features().DirSetModTime; do != nil {
return do(ctx, dir, modTime)
}
return fs.ErrorNotImplemented
}
// CleanUp the trash in the Fs // CleanUp the trash in the Fs
// //
// Implement this if you have a way of emptying the trash or // Implement this if you have a way of emptying the trash or
@@ -1962,7 +1929,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
return return
} }
wrappedNotifyFunc := func(path string, entryType fs.EntryType) { wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType) //fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
if entryType == fs.EntryObject { if entryType == fs.EntryObject {
mainPath, _, _, xactID := f.parseChunkName(path) mainPath, _, _, xactID := f.parseChunkName(path)
metaXactID := "" metaXactID := ""
@@ -2477,7 +2444,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
if len(data) > maxMetadataSizeWritten { if len(data) > maxMetadataSizeWritten {
return nil, false, ErrMetaTooBig return nil, false, ErrMetaTooBig
} }
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' { if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
return nil, false, errors.New("invalid json") return nil, false, errors.New("invalid json")
} }
var metadata metaSimpleJSON var metadata metaSimpleJSON
@@ -2574,8 +2541,6 @@ var (
_ fs.Copier = (*Fs)(nil) _ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil) _ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil) _ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil)

View File

@@ -5,7 +5,7 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"io" "io/ioutil"
"path" "path"
"regexp" "regexp"
"strings" "strings"
@@ -40,7 +40,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
}) })
} }
type settings map[string]any type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs { func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash fsName := strings.Split(f.Name(), "{")[0] // strip off hash
@@ -413,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
if r == nil { if r == nil {
return return
} }
data, err := io.ReadAll(r) data, err := ioutil.ReadAll(r)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, contents, string(data)) assert.Equal(t, contents, string(data))
_ = r.Close() _ = r.Close()
@@ -538,7 +538,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
assert.NoError(t, err) assert.NoError(t, err)
var chunkContents []byte var chunkContents []byte
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
chunkContents, err = io.ReadAll(r) chunkContents, err = ioutil.ReadAll(r)
_ = r.Close() _ = r.Close()
}) })
assert.NoError(t, err) assert.NoError(t, err)
@@ -573,7 +573,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
r, err = willyChunk.Open(ctx) r, err = willyChunk.Open(ctx)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
_, err = io.ReadAll(r) _, err = ioutil.ReadAll(r)
_ = r.Close() _ = r.Close()
}) })
assert.NoError(t, err) assert.NoError(t, err)
@@ -672,7 +672,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
assert.NoError(t, err, "open "+description) assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description) assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil { if err == nil && r != nil {
data, err := io.ReadAll(r) data, err := ioutil.ReadAll(r)
assert.NoError(t, err, "read all of "+description) assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok") assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close() _ = r.Close()
@@ -758,8 +758,8 @@ func testFutureProof(t *testing.T, f *Fs) {
assert.Error(t, err) assert.Error(t, err)
// Rcat must fail // Rcat must fail
in := io.NopCloser(bytes.NewBufferString("abc")) in := ioutil.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil) robj, err := operations.Rcat(ctx, f, file, in, modTime)
assert.Nil(t, robj) assert.Nil(t, robj)
assert.NotNil(t, err) assert.NotNil(t, err)
if err != nil { if err != nil {
@@ -854,7 +854,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
r, err := dstFile.Open(ctx) r, err := dstFile.Open(ctx)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, r) assert.NotNil(t, r)
data, err := io.ReadAll(r) data, err := ioutil.ReadAll(r)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, contents, string(data)) assert.Equal(t, contents, string(data))
_ = r.Close() _ = r.Close()

View File

@@ -36,17 +36,14 @@ func TestIntegration(t *testing.T) {
"GetTier", "GetTier",
"SetTier", "SetTier",
"Metadata", "Metadata",
"SetMetadata",
}, },
UnimplementableFsMethods: []string{ UnimplementableFsMethods: []string{
"PublicLink", "PublicLink",
"OpenWriterAt", "OpenWriterAt",
"OpenChunkWriter",
"MergeDirs", "MergeDirs",
"DirCacheFlush", "DirCacheFlush",
"UserInfo", "UserInfo",
"Disconnect", "Disconnect",
"ListP",
}, },
} }
if *fstest.RemoteName == "" { if *fstest.RemoteName == "" {

View File

@@ -1,48 +0,0 @@
// Package api has type definitions for cloudinary
package api
import (
"fmt"
)
// CloudinaryEncoder extends the built-in encoder
type CloudinaryEncoder interface {
// FromStandardPath takes a / separated path in Standard encoding
// and converts it to a / separated path in this encoding.
FromStandardPath(string) string
// FromStandardName takes name in Standard encoding and converts
// it in this encoding.
FromStandardName(string) string
// ToStandardPath takes a / separated path in this encoding
// and converts it to a / separated path in Standard encoding.
ToStandardPath(string) string
// ToStandardName takes name in this encoding and converts
// it in Standard encoding.
ToStandardName(string, string) string
// Encoded root of the remote (as passed into NewFs)
FromStandardFullPath(string) string
}
// UpdateOptions was created to pass options from Update to Put
type UpdateOptions struct {
PublicID string
ResourceType string
DeliveryType string
AssetFolder string
DisplayName string
}
// Header formats the option as a string
func (o *UpdateOptions) Header() (string, string) {
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *UpdateOptions) Mandatory() bool {
return false
}
// String formats the option into human-readable form
func (o *UpdateOptions) String() string {
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}

View File

@@ -1,754 +0,0 @@
// Package cloudinary provides an interface to the Cloudinary DAM
package cloudinary
import (
"context"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"slices"
"strconv"
"strings"
"time"
"github.com/cloudinary/cloudinary-go/v2"
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
"github.com/cloudinary/cloudinary-go/v2/api/admin"
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
"github.com/rclone/rclone/backend/cloudinary/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"github.com/zeebo/blake3"
)
// Cloudinary shouldn't have a trailing dot if there is no path
func cldPathDir(somePath string) string {
if somePath == "" || somePath == "." {
return somePath
}
dir := path.Dir(somePath)
if dir == "." {
return ""
}
return dir
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "cloudinary",
Description: "Cloudinary",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "cloud_name",
Help: "Cloudinary Environment Name",
Required: true,
Sensitive: true,
},
{
Name: "api_key",
Help: "Cloudinary API Key",
Required: true,
Sensitive: true,
},
{
Name: "api_secret",
Help: "Cloudinary API Secret",
Required: true,
Sensitive: true,
},
{
Name: "upload_prefix",
Help: "Specify the API endpoint for environments out of the US",
},
{
Name: "upload_preset",
Help: "Upload Preset to select asset manipulation on upload",
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
encoder.EncodeSlash |
encoder.EncodeLtGt |
encoder.EncodeDoubleQuote |
encoder.EncodeQuestion |
encoder.EncodeAsterisk |
encoder.EncodePipe |
encoder.EncodeHash |
encoder.EncodePercent |
encoder.EncodeBackSlash |
encoder.EncodeDel |
encoder.EncodeCtl |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8 |
encoder.EncodeDot),
},
{
Name: "eventually_consistent_delay",
Default: fs.Duration(0),
Advanced: true,
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
},
{
Name: "adjust_media_files_extensions",
Default: true,
Advanced: true,
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
},
{
Name: "media_extensions",
Default: []string{
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
Advanced: true,
Help: "Cloudinary supported media extensions",
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
MediaExtensions []string `config:"media_extensions"`
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
}
// Fs represents a remote cloudinary server
type Fs struct {
name string
root string
opt Options
features *fs.Features
pacer *fs.Pacer
srv *rest.Client // For downloading assets via the Cloudinary CDN
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
lastCRUD time.Time
}
// Object describes a cloudinary object
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
url string
md5sum string
publicID string
resourceType string
deliveryType string
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Initialize the Cloudinary client
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
if err != nil {
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
}
cld.Admin.Client = *fshttp.NewClient(ctx)
cld.Upload.Client = *fshttp.NewClient(ctx)
if opt.UploadPrefix != "" {
cld.Config.API.UploadPrefix = opt.UploadPrefix
}
client := fshttp.NewClient(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
cld: cld,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
srv: rest.NewClient(client),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
if root != "" {
// Check to see if the root actually an existing file
remote := path.Base(root)
f.root = cldPathDir(root)
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return the previous root
f.root = root
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// ------------------------------------------------------------
// FromStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
}
// FromStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardName(s string) string {
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(s)
ext := ""
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
s = strings.TrimSuffix(parsedURL.Path, ext)
}
}
}
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
}
// ToStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
}
// ToStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardName(s string, assetURL string) string {
ext := ""
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(assetURL)
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
ext = ""
}
}
}
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
}
// FromStandardFullPath encodes a full path to Cloudinary standard
func (f *Fs) FromStandardFullPath(dir string) string {
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
}
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
func (f *Fs) ToAssetFolderAPI(dir string) string {
return strings.ReplaceAll(dir, "%", "%25")
}
// ToDisplayNameElastic encodes a special case of elasticsearch
func (f *Fs) ToDisplayNameElastic(dir string) string {
return strings.ReplaceAll(dir, "!", "\\!")
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// WaitEventuallyConsistent waits till the FS is eventually consistent
func (f *Fs) WaitEventuallyConsistent() {
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
return
}
delay := time.Duration(f.opt.EventuallyConsistentDelay)
timeSinceLastCRUD := time.Since(f.lastCRUD)
if timeSinceLastCRUD < delay {
time.Sleep(delay - timeSinceLastCRUD)
}
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Cloudinary root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// List the objects and directories in dir into entries
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
remotePrefix := f.FromStandardFullPath(dir)
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
remotePrefix += "/"
}
var entries fs.DirEntries
dirs := make(map[string]struct{})
nextCursor := ""
f.WaitEventuallyConsistent()
for {
// user the folders api to list folders.
folderParams := admin.SubFoldersParams{
Folder: f.ToAssetFolderAPI(remotePrefix),
MaxResults: 500,
}
if nextCursor != "" {
folderParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
}
if results.Error.Message != "" {
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
return nil, fs.ErrorDirNotFound
}
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
}
for _, folder := range results.Folders {
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
parts := strings.Split(relativePath, "/")
// It's a directory
dirName := parts[len(parts)-1]
if _, found := dirs[dirName]; !found {
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
entries = append(entries, d)
dirs[dirName] = struct{}{}
}
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
for {
// Use the assets.AssetsByAssetFolder API to list assets
assetsParams := admin.AssetsByAssetFolderParams{
AssetFolder: remotePrefix,
MaxResults: 500,
}
if nextCursor != "" {
assetsParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
if err != nil {
return nil, fmt.Errorf("failed to list assets: %w", err)
}
for _, asset := range results.Assets {
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.CreatedAt,
url: asset.SecureURL,
publicID: asset.PublicID,
resourceType: asset.AssetType,
deliveryType: asset.Type,
}
entries = append(entries, o)
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
return entries, nil
}
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
searchParams := search.Query{
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
f.FromStandardFullPath(cldPathDir(remote)),
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
MaxResults: 2,
}
var results *admin.SearchResult
f.WaitEventuallyConsistent()
err := f.pacer.Call(func() (bool, error) {
var err1 error
results, err1 = f.cld.Admin.Search(ctx, searchParams)
if err1 == nil && results.TotalCount != len(results.Assets) {
err1 = errors.New("partial response so waiting for eventual consistency")
}
return shouldRetry(ctx, nil, err1)
})
if err != nil {
return nil, fs.ErrorObjectNotFound
}
if results.TotalCount == 0 || len(results.Assets) == 0 {
return nil, fs.ErrorObjectNotFound
}
asset := results.Assets[0]
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.UploadedAt,
url: asset.SecureURL,
md5sum: asset.Etag,
publicID: asset.PublicID,
resourceType: asset.ResourceType,
deliveryType: asset.Type,
}
return o, nil
}
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
payload := []byte(path.Join(assetFolder, displayName))
hash := blake3.Sum256(payload)
return hex.EncodeToString(hash[:])
}
// Put uploads content to Cloudinary
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if src.Size() == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
params := uploader.UploadParams{
UploadPreset: f.opt.UploadPreset,
}
updateObject := false
var modTime time.Time
for _, option := range options {
if updateOptions, ok := option.(*api.UpdateOptions); ok {
if updateOptions.PublicID != "" {
updateObject = true
params.Overwrite = SDKApi.Bool(true)
params.Invalidate = SDKApi.Bool(true)
params.PublicID = updateOptions.PublicID
params.ResourceType = updateOptions.ResourceType
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
params.AssetFolder = updateOptions.AssetFolder
params.DisplayName = updateOptions.DisplayName
modTime = src.ModTime(ctx)
}
}
}
if !updateObject {
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
}
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
f.lastCRUD = time.Now()
if err != nil {
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
}
if !updateObject {
modTime = uploadResult.CreatedAt
}
if uploadResult.Error.Message != "" {
return nil, errors.New(uploadResult.Error.Message)
}
o := &Object{
fs: f,
remote: src.Remote(),
size: int64(uploadResult.Bytes),
modTime: modTime,
url: uploadResult.SecureURL,
md5sum: uploadResult.Etag,
publicID: uploadResult.PublicID,
resourceType: uploadResult.ResourceType,
deliveryType: uploadResult.Type,
}
return o, nil
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash sets
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// Mkdir creates empty folders
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
res, err := f.cld.Admin.CreateFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
return nil
}
// Rmdir deletes empty folders
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Additional test because Cloudinary will delete folders without
// assets, regardless of empty sub-folders
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
folderParams := admin.SubFoldersParams{
Folder: folder,
MaxResults: 1,
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return err
}
if results.TotalCount > 0 {
return fs.ErrorDirectoryNotEmpty
}
params := admin.DeleteFolderParams{Folder: folder}
res, err := f.cld.Admin.DeleteFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
return fs.ErrorDirNotFound
}
return errors.New(res.Error.Message)
}
return nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
420, // Too Many Requests (legacy)
429, // Too Many Requests
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if err != nil {
tryAgain := "Try again on "
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
layout := "2006-01-02 15:04:05 UTC"
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
timestamp, err2 := time.Parse(layout, dateStr)
if err2 == nil {
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
}
}
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// ------------------------------------------------------------
// Hash returns the MD5 of an object
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5sum, nil
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// Size of object in bytes
func (o *Object) Size() int64 {
return o.size
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: o.url,
Options: options,
}
var offset int64
var count int64
var key string
var value string
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
if count < 0 {
count = o.size - offset
}
key, value = option.Header()
case *fs.SeekOption:
offset = x.Offset
count = o.size - offset
key, value = option.Header()
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if key != "" && value != "" {
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders[key] = value
}
// Make sure that the asset is fully available
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
if err == nil {
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
if clErr == nil && count == int64(cl) {
return false, nil
}
}
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
}
return resp.Body, err
}
// Update the object with the contents of the io.Reader
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
options = append(options, &api.UpdateOptions{
PublicID: o.publicID,
ResourceType: o.resourceType,
DeliveryType: o.deliveryType,
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
})
updatedObj, err := o.fs.Put(ctx, in, src, options...)
if err != nil {
return err
}
if uo, ok := updatedObj.(*Object); ok {
o.size = uo.size
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
o.url = uo.url
o.md5sum = uo.md5sum
o.publicID = uo.publicID
o.resourceType = uo.resourceType
o.deliveryType = uo.deliveryType
}
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
params := uploader.DestroyParams{
PublicID: o.publicID,
ResourceType: o.resourceType,
Type: o.deliveryType,
}
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
o.fs.lastCRUD = time.Now()
if dErr != nil {
return dErr
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
if res.Result != "ok" {
return errors.New(res.Result)
}
return nil
}

View File

@@ -1,23 +0,0 @@
// Test Cloudinary filesystem interface
package cloudinary_test
import (
"testing"
"github.com/rclone/rclone/backend/cloudinary"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
name := "TestCloudinary"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*cloudinary.Object)(nil),
SkipInvalidUTF8: true,
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
},
})
}

View File

@@ -1,4 +1,4 @@
// Package combine implements a backend to combine multiple remotes in a directory tree // Package combine implents a backend to combine multiple remotes in a directory tree
package combine package combine
/* /*
@@ -20,7 +20,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
@@ -234,12 +233,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: true, UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}).Fill(ctx, f) }).Fill(ctx, f)
canMove := true canMove := true
for _, u := range f.upstreams { for _, u := range f.upstreams {
@@ -266,9 +259,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
} }
} }
// Enable ListP always
features.ListP = f.ListP
// Enable Purge when any upstreams support it // Enable Purge when any upstreams support it
if features.Purge == nil { if features.Purge == nil {
for _, u := range f.upstreams { for _, u := range f.upstreams {
@@ -299,16 +289,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
} }
} }
// Enable CleanUp when any upstreams support it
if features.CleanUp == nil {
for _, u := range f.upstreams {
if u.f.Features().CleanUp != nil {
features.CleanUp = f.CleanUp
break
}
}
}
// Enable ChangeNotify when any upstreams support it // Enable ChangeNotify when any upstreams support it
if features.ChangeNotify == nil { if features.ChangeNotify == nil {
for _, u := range f.upstreams { for _, u := range f.upstreams {
@@ -319,9 +299,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
} }
} }
// show that we wrap other backends
features.Overlay = true
f.features = features f.features = features
// Get common intersection of hashes // Get common intersection of hashes
@@ -374,7 +351,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
return g.Wait() return g.Wait()
} }
// join the elements together but unlike path.Join return empty string // join the elements together but unline path.Join return empty string
func join(elem ...string) string { func join(elem ...string) string {
result := path.Join(elem...) result := path.Join(elem...)
if result == "." { if result == "." {
@@ -449,32 +426,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return u.f.Mkdir(ctx, uRemote) return u.f.Mkdir(ctx, uRemote)
} }
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return nil, err
}
do := u.f.Features().MkdirMetadata
if do == nil {
return nil, fs.ErrorNotImplemented
}
newDir, err := do(ctx, uRemote, metadata)
if err != nil {
return nil, err
}
entries := fs.DirEntries{newDir}
entries, err = u.wrapEntries(ctx, entries)
if err != nil {
return nil, err
}
newDir, ok := entries[0].(fs.Directory)
if !ok {
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
}
return newDir, nil
}
// purge the upstream or fallback to a slow way // purge the upstream or fallback to a slow way
func (u *upstream) purge(ctx context.Context, dir string) (err error) { func (u *upstream) purge(ctx context.Context, dir string) (err error) {
if do := u.f.Features().Purge; do != nil { if do := u.f.Features().Purge; do != nil {
@@ -680,7 +631,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
if err != nil { if err != nil {
return nil, err return nil, err
} }
uSrc := fs.NewOverrideRemote(src, uRemote) uSrc := operations.NewOverrideRemote(src, uRemote)
var o fs.Object var o fs.Object
if stream { if stream {
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...) o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
@@ -790,11 +741,12 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
case fs.Object: case fs.Object:
entries[i] = u.newObject(x) entries[i] = u.newObject(x)
case fs.Directory: case fs.Directory:
newPath, err := u.pathAdjustment.do(x.Remote()) newDir := fs.NewDirCopy(ctx, x)
newPath, err := u.pathAdjustment.do(newDir.Remote())
if err != nil { if err != nil {
return nil, err return nil, err
} }
newDir := fs.NewDirWrapper(newPath, x) newDir.SetRemote(newPath)
entries[i] = newDir entries[i] = newDir
default: default:
return nil, fmt.Errorf("unknown entry type %T", entry) return nil, fmt.Errorf("unknown entry type %T", entry)
@@ -813,52 +765,24 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err) // defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
if f.root == "" && dir == "" { if f.root == "" && dir == "" {
entries := make(fs.DirEntries, 0, len(f.upstreams)) entries = make(fs.DirEntries, 0, len(f.upstreams))
for combineDir := range f.upstreams { for combineDir := range f.upstreams {
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when)) d := fs.NewDir(combineDir, f.when)
entries = append(entries, d) entries = append(entries, d)
} }
return callback(entries) return entries, nil
} }
u, uRemote, err := f.findUpstream(dir) u, uRemote, err := f.findUpstream(dir)
if err != nil { if err != nil {
return err return nil, err
} }
wrappedCallback := func(entries fs.DirEntries) error { entries, err = u.f.List(ctx, uRemote)
entries, err := u.wrapEntries(ctx, entries)
if err != nil { if err != nil {
return err return nil, err
} }
return callback(entries) return u.wrapEntries(ctx, entries)
}
listP := u.f.Features().ListP
if listP == nil {
entries, err := u.f.List(ctx, uRemote)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, uRemote, wrappedCallback)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -963,116 +887,6 @@ func (f *Fs) Shutdown(ctx context.Context) error {
}) })
} }
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return "", err
}
do := u.f.Features().PublicLink
if do == nil {
return "", fs.ErrorNotImplemented
}
return do(ctx, uRemote, expire, unlink)
}
// PutUnchecked in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
srcPath := src.Remote()
u, uRemote, err := f.findUpstream(srcPath)
if err != nil {
return nil, err
}
do := u.f.Features().PutUnchecked
if do == nil {
return nil, fs.ErrorNotImplemented
}
uSrc := fs.NewOverrideRemote(src, uRemote)
return do(ctx, in, uSrc, options...)
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) == 0 {
return nil
}
var (
u *upstream
uDirs []fs.Directory
)
for _, dir := range dirs {
uNew, uDir, err := f.findUpstream(dir.Remote())
if err != nil {
return err
}
if u == nil {
u = uNew
} else if u != uNew {
return fmt.Errorf("can't merge directories from different upstreams")
}
uDirs = append(uDirs, fs.NewOverrideDirectory(dir, uDir))
}
do := u.f.Features().MergeDirs
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx, uDirs)
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
u, uDir, err := f.findUpstream(dir)
if err != nil {
return err
}
if uDir == "" {
fs.Debugf(dir, "Can't set modtime on upstream root. skipping.")
return nil
}
if do := u.f.Features().DirSetModTime; do != nil {
return do(ctx, uDir, modTime)
}
return fs.ErrorNotImplemented
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().CleanUp; do != nil {
return do(ctx)
}
return nil
})
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := u.f.Features().OpenWriterAt
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx, uRemote, size)
}
// Object describes a wrapped Object // Object describes a wrapped Object
// //
// This is a wrapped Object which knows its path prefix // This is a wrapped Object which knows its path prefix
@@ -1102,7 +916,7 @@ func (o *Object) String() string {
func (o *Object) Remote() string { func (o *Object) Remote() string {
newPath, err := o.u.pathAdjustment.do(o.Object.String()) newPath, err := o.u.pathAdjustment.do(o.Object.String())
if err != nil { if err != nil {
fs.Errorf(o.Object, "Bad object: %v", err) fs.Errorf(o, "Bad object: %v", err)
return err.Error() return err.Error()
} }
return newPath return newPath
@@ -1151,17 +965,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx) return do.Metadata(ctx)
} }
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// SetTier performs changing storage tier of the Object if // SetTier performs changing storage tier of the Object if
// multiple storage classes supported // multiple storage classes supported
func (o *Object) SetTier(tier string) error { func (o *Object) SetTier(tier string) error {
@@ -1185,12 +988,5 @@ var (
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil) _ fs.ListRer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil) _ fs.Shutdowner = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil) _ fs.FullObject = (*Object)(nil)
) )

View File

@@ -10,11 +10,6 @@ import (
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
unimplementableObjectMethods = []string{}
)
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" { if *fstest.RemoteName == "" {
@@ -22,8 +17,8 @@ func TestIntegration(t *testing.T) {
} }
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName, RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: unimplementableFsMethods, UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: unimplementableObjectMethods, UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -41,8 +36,6 @@ func TestLocal(t *testing.T) {
{Name: name, Key: "upstreams", Value: upstreams}, {Name: name, Key: "upstreams", Value: upstreams},
}, },
QuickTestOK: true, QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
}) })
} }
@@ -59,8 +52,6 @@ func TestMemory(t *testing.T) {
{Name: name, Key: "upstreams", Value: upstreams}, {Name: name, Key: "upstreams", Value: upstreams},
}, },
QuickTestOK: true, QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
}) })
} }
@@ -77,8 +68,6 @@ func TestMixed(t *testing.T) {
{Name: name, Key: "type", Value: "combine"}, {Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams}, {Name: name, Key: "upstreams", Value: upstreams},
}, },
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
}) })
} }

View File

@@ -13,8 +13,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path"
"regexp" "regexp"
"strings" "strings"
"time" "time"
@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
@@ -39,7 +38,6 @@ import (
const ( const (
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
maxChunkSize = 8388608 // at 256 KiB and 8 MiB. maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
chunkStreams = 0 // Streams to use for reading
bufferSize = 8388608 bufferSize = 8388608
heuristicBytes = 1048576 heuristicBytes = 1048576
@@ -175,13 +173,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
opt: *opt, opt: *opt,
mode: compressionModeFromName(opt.CompressionMode), mode: compressionModeFromName(opt.CompressionMode),
} }
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
// the features here are ones we could support, and they are // the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs // ANDed with the ones from wrappedFs
f.features = (&fs.Features{ f.features = (&fs.Features{
@@ -196,12 +187,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: true, UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// We support reading MIME types no matter the wrapped fs // We support reading MIME types no matter the wrapped fs
f.features.ReadMimeType = true f.features.ReadMimeType = true
@@ -209,8 +194,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
if !operations.CanServerSideMove(wrappedFs) { if !operations.CanServerSideMove(wrappedFs) {
f.features.Disable("PutStream") f.features.Disable("PutStream")
} }
// Enable ListP always
f.features.ListP = f.ListP
return f, err return f, err
} }
@@ -274,16 +257,6 @@ func isMetadataFile(filename string) bool {
return strings.HasSuffix(filename, metaFileExt) return strings.HasSuffix(filename, metaFileExt)
} }
// Checks whether a file is a metadata file and returns the original
// file name and a flag indicating whether it was a metadata file or
// not.
func unwrapMetadataFile(filename string) (string, bool) {
if !isMetadataFile(filename) {
return "", false
}
return filename[:len(filename)-len(metaFileExt)], true
}
// makeDataName generates the file name for a data file with specified compression mode // makeDataName generates the file name for a data file with specified compression mode
func makeDataName(remote string, size int64, mode int) (newRemote string) { func makeDataName(remote string, size int64, mode int) (newRemote string) {
if mode != Uncompressed { if mode != Uncompressed {
@@ -355,39 +328,11 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
// found. // found.
// List entries and process them // List entries and process them
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f) entries, err = f.Fs.List(ctx, dir)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.processEntries(entries)
if err != nil { if err != nil {
return err return nil, err
} }
return callback(entries) return f.processEntries(entries)
}
listP := f.Fs.Features().ListP
if listP == nil {
entries, err := f.Fs.List(ctx, dir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -487,7 +432,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
if err != nil { if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err) fs.Errorf(o, "Failed to remove corrupted object: %v", err)
} }
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash) return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
} }
return nil return nil
} }
@@ -523,7 +468,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
} }
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file") fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
tempFile, err := os.CreateTemp("", "rclone-press-") tempFile, err := ioutil.TempFile("", "rclone-press-")
defer func() { defer func() {
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish // these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
// to ignore them // to ignore them
@@ -601,8 +546,8 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
} }
// Transfer the data // Transfer the data
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options) o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx)) //o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
if err != nil { if err != nil {
if o != nil { if o != nil {
removeErr := o.Remove(ctx) removeErr := o.Remove(ctx)
@@ -821,14 +766,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.Fs.Mkdir(ctx, dir) return f.Fs.Mkdir(ctx, dir)
} }
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
if do := f.Fs.Features().MkdirMetadata; do != nil {
return do(ctx, dir, metadata)
}
return nil, fs.ErrorNotImplemented
}
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
@@ -972,14 +909,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return do(ctx, srcFs.Fs, srcRemote, dstRemote) return do(ctx, srcFs.Fs, srcRemote, dstRemote)
} }
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
if do := f.Fs.Features().DirSetModTime; do != nil {
return do(ctx, dir, modTime)
}
return fs.ErrorNotImplemented
}
// CleanUp the trash in the Fs // CleanUp the trash in the Fs
// //
// Implement this if you have a way of emptying the trash or // Implement this if you have a way of emptying the trash or
@@ -1051,7 +980,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
fs.Logf(f, "path %q entryType %d", path, entryType) fs.Logf(f, "path %q entryType %d", path, entryType)
var ( var (
wrappedPath string wrappedPath string
isMetadataFile bool
) )
switch entryType { switch entryType {
case fs.EntryDirectory: case fs.EntryDirectory:
@@ -1059,10 +987,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
case fs.EntryObject: case fs.EntryObject:
// Note: All we really need to do to monitor the object is to check whether the metadata changed, // Note: All we really need to do to monitor the object is to check whether the metadata changed,
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same. // as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
wrappedPath, isMetadataFile = unwrapMetadataFile(path) wrappedPath = makeMetadataName(path)
if !isMetadataFile {
return
}
default: default:
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType) fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
return return
@@ -1318,17 +1243,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx) return do.Metadata(ctx)
} }
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// Hash returns the selected checksum of the file // Hash returns the selected checksum of the file
// If no checksum is available it returns "" // If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
@@ -1394,7 +1308,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
} }
} }
// Get a chunkedreader for the wrapped object // Get a chunkedreader for the wrapped object
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams) chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
// Get file handle // Get file handle
var file io.Reader var file io.Reader
if offset != 0 { if offset != 0 {
@@ -1561,8 +1475,6 @@ var (
_ fs.Copier = (*Fs)(nil) _ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil) _ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil) _ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil) _ fs.UnWrapper = (*Fs)(nil)

View File

@@ -14,12 +14,13 @@ import (
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
var defaultOpt = fstests.Opt{ // TestIntegration runs integration tests against the remote
RemoteName: "TestCompress:", func TestIntegration(t *testing.T) {
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil), NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{ UnimplementableFsMethods: []string{
"OpenWriterAt", "OpenWriterAt",
"OpenChunkWriter",
"MergeDirs", "MergeDirs",
"DirCacheFlush", "DirCacheFlush",
"PutUnchecked", "PutUnchecked",
@@ -28,12 +29,8 @@ var defaultOpt = fstests.Opt{
"Disconnect", "Disconnect",
}, },
TiersToTest: []string{"STANDARD", "STANDARD_IA"}, TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{}, UnimplementableObjectMethods: []string{}}
} fstests.Run(t, &opt)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &defaultOpt)
} }
// TestRemoteGzip tests GZIP compression // TestRemoteGzip tests GZIP compression
@@ -43,13 +40,27 @@ func TestRemoteGzip(t *testing.T) {
} }
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip") tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
name := "TestCompressGzip" name := "TestCompressGzip"
opt := defaultOpt fstests.Run(t, &fstests.Opt{
opt.RemoteName = name + ":" RemoteName: name + ":",
opt.ExtraConfig = []fstests.ExtraConfigItem{ NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
UnimplementableObjectMethods: []string{
"GetTier",
"SetTier",
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"}, {Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir}, {Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"}, {Name: name, Key: "compression_mode", Value: "gzip"},
} },
opt.QuickTestOK = true QuickTestOK: true,
fstests.Run(t, &opt) })
} }

View File

@@ -21,7 +21,6 @@ import (
"github.com/rclone/rclone/backend/crypt/pkcs7" "github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/version" "github.com/rclone/rclone/lib/version"
"github.com/rfjakob/eme" "github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/nacl/secretbox"
@@ -38,6 +37,7 @@ const (
blockHeaderSize = secretbox.Overhead blockHeaderSize = secretbox.Overhead
blockDataSize = 64 * 1024 blockDataSize = 64 * 1024
blockSize = blockHeaderSize + blockDataSize blockSize = blockHeaderSize + blockDataSize
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
) )
// Errors returned by cipher // Errors returned by cipher
@@ -53,9 +53,8 @@ var (
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?") ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding") ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
ErrorFileClosed = errors.New("file already closed") ErrorFileClosed = errors.New("file already closed")
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix") ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
ErrorBadSeek = errors.New("Seek beyond end of file") ErrorBadSeek = errors.New("Seek beyond end of file")
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1} defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
obfuscQuoteRune = '!' obfuscQuoteRune = '!'
) )
@@ -179,8 +178,6 @@ type Cipher struct {
buffers sync.Pool // encrypt/decrypt buffers buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool dirNameEncrypt bool
passBadBlocks bool // if set passed bad blocks as zeroed blocks
encryptedSuffix string
} }
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val // newCipher initialises the cipher. If salt is "" then it uses a built in salt val
@@ -190,10 +187,9 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
fileNameEnc: enc, fileNameEnc: enc,
cryptoRand: rand.Reader, cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt, dirNameEncrypt: dirNameEncrypt,
encryptedSuffix: ".bin",
} }
c.buffers.New = func() any { c.buffers.New = func() interface{} {
return new([blockSize]byte) return make([]byte, blockSize)
} }
err := c.Key(password, salt) err := c.Key(password, salt)
if err != nil { if err != nil {
@@ -202,29 +198,11 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
return c, nil return c, nil
} }
// setEncryptedSuffix set suffix, or an empty string
func (c *Cipher) setEncryptedSuffix(suffix string) {
if strings.EqualFold(suffix, "none") {
c.encryptedSuffix = ""
return
}
if !strings.HasPrefix(suffix, ".") {
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
suffix = "." + suffix
}
c.encryptedSuffix = suffix
}
// Call to set bad block pass through
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
c.passBadBlocks = passBadBlocks
}
// Key creates all the internal keys from the password passed in using // Key creates all the internal keys from the password passed in using
// scrypt. // scrypt.
// //
// If salt is "" we use a fixed salt just to make attackers lives // If salt is "" we use a fixed salt just to make attackers lives
// slightly harder than using no salt. // slighty harder than using no salt.
// //
// Note that empty password makes all 0x00 keys which is used in the // Note that empty password makes all 0x00 keys which is used in the
// tests. // tests.
@@ -252,12 +230,15 @@ func (c *Cipher) Key(password, salt string) (err error) {
} }
// getBlock gets a block from the pool of size blockSize // getBlock gets a block from the pool of size blockSize
func (c *Cipher) getBlock() *[blockSize]byte { func (c *Cipher) getBlock() []byte {
return c.buffers.Get().(*[blockSize]byte) return c.buffers.Get().([]byte)
} }
// putBlock returns a block to the pool of size blockSize // putBlock returns a block to the pool of size blockSize
func (c *Cipher) putBlock(buf *[blockSize]byte) { func (c *Cipher) putBlock(buf []byte) {
if len(buf) != blockSize {
panic("bad blocksize returned to pool")
}
c.buffers.Put(buf) c.buffers.Put(buf)
} }
@@ -329,14 +310,14 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
for _, runeValue := range plaintext { for _, runeValue := range plaintext {
dir += int(runeValue) dir += int(runeValue)
} }
dir %= 256 dir = dir % 256
// We'll use this number to store in the result filename... // We'll use this number to store in the result filename...
var result bytes.Buffer var result bytes.Buffer
_, _ = result.WriteString(strconv.Itoa(dir) + ".") _, _ = result.WriteString(strconv.Itoa(dir) + ".")
// but we'll augment it with the nameKey for real calculation // but we'll augment it with the nameKey for real calculation
for i := range len(c.nameKey) { for i := 0; i < len(c.nameKey); i++ {
dir += int(c.nameKey[i]) dir += int(c.nameKey[i])
} }
@@ -418,7 +399,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
} }
// add the nameKey to get the real rotate distance // add the nameKey to get the real rotate distance
for i := range len(c.nameKey) { for i := 0; i < len(c.nameKey); i++ {
dir += int(c.nameKey[i]) dir += int(c.nameKey[i])
} }
@@ -450,7 +431,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
if pos >= 26 { if pos >= 26 {
pos -= 6 pos -= 6
} }
pos -= thisdir pos = pos - thisdir
if pos < 0 { if pos < 0 {
pos += 52 pos += 52
} }
@@ -527,7 +508,7 @@ func (c *Cipher) encryptFileName(in string) string {
// EncryptFileName encrypts a file path // EncryptFileName encrypts a file path
func (c *Cipher) EncryptFileName(in string) string { func (c *Cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff { if c.mode == NameEncryptionOff {
return in + c.encryptedSuffix return in + encryptedSuffix
} }
return c.encryptFileName(in) return c.encryptFileName(in)
} }
@@ -587,8 +568,8 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
// DecryptFileName decrypts a file path // DecryptFileName decrypts a file path
func (c *Cipher) DecryptFileName(in string) (string, error) { func (c *Cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff { if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(c.encryptedSuffix) remainingLength := len(in) - len(encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) { if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
return "", ErrorNotAnEncryptedFile return "", ErrorNotAnEncryptedFile
} }
decrypted := in[:remainingLength] decrypted := in[:remainingLength]
@@ -628,7 +609,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
// fromReader fills the nonce from an io.Reader - normally the OSes // fromReader fills the nonce from an io.Reader - normally the OSes
// crypto random number generator // crypto random number generator
func (n *nonce) fromReader(in io.Reader) error { func (n *nonce) fromReader(in io.Reader) error {
read, err := readers.ReadFill(in, (*n)[:]) read, err := io.ReadFull(in, (*n)[:])
if read != fileNonceSize { if read != fileNonceSize {
return fmt.Errorf("short read of nonce: %w", err) return fmt.Errorf("short read of nonce: %w", err)
} }
@@ -664,7 +645,7 @@ func (n *nonce) increment() {
// add a uint64 to the nonce // add a uint64 to the nonce
func (n *nonce) add(x uint64) { func (n *nonce) add(x uint64) {
carry := uint16(0) carry := uint16(0)
for i := range 8 { for i := 0; i < 8; i++ {
digit := (*n)[i] digit := (*n)[i]
xDigit := byte(x) xDigit := byte(x)
x >>= 8 x >>= 8
@@ -683,8 +664,8 @@ type encrypter struct {
in io.Reader in io.Reader
c *Cipher c *Cipher
nonce nonce nonce nonce
buf *[blockSize]byte buf []byte
readBuf *[blockSize]byte readBuf []byte
bufIndex int bufIndex int
bufSize int bufSize int
err error err error
@@ -709,9 +690,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
} }
} }
// Copy magic into buffer // Copy magic into buffer
copy((*fh.buf)[:], fileMagicBytes) copy(fh.buf, fileMagicBytes)
// Copy nonce into buffer // Copy nonce into buffer
copy((*fh.buf)[fileMagicSize:], fh.nonce[:]) copy(fh.buf[fileMagicSize:], fh.nonce[:])
return fh, nil return fh, nil
} }
@@ -726,20 +707,22 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
if fh.bufIndex >= fh.bufSize { if fh.bufIndex >= fh.bufSize {
// Read data // Read data
// FIXME should overlap the reads with a go-routine and 2 buffers? // FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := (*fh.readBuf)[:blockDataSize] readBuf := fh.readBuf[:blockDataSize]
n, err = readers.ReadFill(fh.in, readBuf) n, err = io.ReadFull(fh.in, readBuf)
if n == 0 { if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return fh.finish(err) return fh.finish(err)
} }
// possibly err != nil here, but we will process the // possibly err != nil here, but we will process the
// data and the next call to ReadFill will return 0, err // data and the next call to ReadFull will return 0, err
// Encrypt the block using the nonce // Encrypt the block using the nonce
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey) secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
fh.bufIndex = 0 fh.bufIndex = 0
fh.bufSize = blockHeaderSize + n fh.bufSize = blockHeaderSize + n
fh.nonce.increment() fh.nonce.increment()
} }
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize]) n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
fh.bufIndex += n fh.bufIndex += n
return n, nil return n, nil
} }
@@ -780,8 +763,8 @@ type decrypter struct {
nonce nonce nonce nonce
initialNonce nonce initialNonce nonce
c *Cipher c *Cipher
buf *[blockSize]byte buf []byte
readBuf *[blockSize]byte readBuf []byte
bufIndex int bufIndex int
bufSize int bufSize int
err error err error
@@ -799,12 +782,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
limit: -1, limit: -1,
} }
// Read file header (magic + nonce) // Read file header (magic + nonce)
readBuf := (*fh.readBuf)[:fileHeaderSize] readBuf := fh.readBuf[:fileHeaderSize]
n, err := readers.ReadFill(fh.rc, readBuf) _, err := io.ReadFull(fh.rc, readBuf)
if n < fileHeaderSize && err == io.EOF { if err == io.EOF || err == io.ErrUnexpectedEOF {
// This read from 0..fileHeaderSize-1 bytes // This read from 0..fileHeaderSize-1 bytes
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort) return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
} else if err != io.EOF && err != nil { } else if err != nil {
return nil, fh.finishAndClose(err) return nil, fh.finishAndClose(err)
} }
// check the magic // check the magic
@@ -862,8 +845,10 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
func (fh *decrypter) fillBuffer() (err error) { func (fh *decrypter) fillBuffer() (err error) {
// FIXME should overlap the reads with a go-routine and 2 buffers? // FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := fh.readBuf readBuf := fh.readBuf
n, err := readers.ReadFill(fh.rc, (*readBuf)[:]) n, err := io.ReadFull(fh.rc, readBuf)
if n == 0 { if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return err return err
} }
// possibly err != nil here, but we will process the data and // possibly err != nil here, but we will process the data and
@@ -871,26 +856,19 @@ func (fh *decrypter) fillBuffer() (err error) {
// Check header + 1 byte exists // Check header + 1 byte exists
if n <= blockHeaderSize { if n <= blockHeaderSize {
if err != nil && err != io.EOF { if err != nil {
return err // return pending error as it is likely more accurate return err // return pending error as it is likely more accurate
} }
return ErrorEncryptedFileBadHeader return ErrorEncryptedFileBadHeader
} }
// Decrypt the block using the nonce // Decrypt the block using the nonce
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey) _, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
if !ok { if !ok {
if err != nil && err != io.EOF { if err != nil {
return err // return pending error as it is likely more accurate return err // return pending error as it is likely more accurate
} }
if !fh.c.passBadBlocks {
return ErrorEncryptedBadBlock return ErrorEncryptedBadBlock
} }
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
// Zero out the bad block and continue
for i := range (*fh.buf)[:n] {
fh.buf[i] = 0
}
}
fh.bufIndex = 0 fh.bufIndex = 0
fh.bufSize = n - blockHeaderSize fh.bufSize = n - blockHeaderSize
fh.nonce.increment() fh.nonce.increment()
@@ -915,7 +893,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
if fh.limit >= 0 && fh.limit < int64(toCopy) { if fh.limit >= 0 && fh.limit < int64(toCopy) {
toCopy = int(fh.limit) toCopy = int(fh.limit)
} }
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy]) n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
fh.bufIndex += n fh.bufIndex += n
if fh.limit >= 0 { if fh.limit >= 0 {
fh.limit -= int64(n) fh.limit -= int64(n)
@@ -926,8 +904,9 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
return n, nil return n, nil
} }
// calculateUnderlying converts an (offset, limit) in an encrypted file // calculateUnderlying converts an (offset, limit) in a crypted file
// into an (underlyingOffset, underlyingLimit) for the underlying file. // into an (underlyingOffset, underlyingLimit) for the underlying
// file.
// //
// It also returns number of bytes to discard after reading the first // It also returns number of bytes to discard after reading the first
// block and number of blocks this is from the start so the nonce can // block and number of blocks this is from the start so the nonce can

View File

@@ -8,6 +8,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"strings" "strings"
"testing" "testing"
@@ -27,14 +28,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
{"off", NameEncryptionOff, ""}, {"off", NameEncryptionOff, ""},
{"standard", NameEncryptionStandard, ""}, {"standard", NameEncryptionStandard, ""},
{"obfuscate", NameEncryptionObfuscated, ""}, {"obfuscate", NameEncryptionObfuscated, ""},
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""}, {"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
} { } {
actual, actualErr := NewNameEncryptionMode(test.in) actual, actualErr := NewNameEncryptionMode(test.in)
assert.Equal(t, actual, test.expected) assert.Equal(t, actual, test.expected)
if test.expectedErr == "" { if test.expectedErr == "" {
assert.NoError(t, actualErr) assert.NoError(t, actualErr)
} else { } else {
assert.EqualError(t, actualErr, test.expectedErr) assert.Error(t, actualErr, test.expectedErr)
} }
} }
} }
@@ -405,13 +406,6 @@ func TestNonStandardEncryptFileName(t *testing.T) {
// Off mode // Off mode
c, _ := newCipher(NameEncryptionOff, "", "", true, nil) c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123")) assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
// Off mode with custom suffix
c, _ = newCipher(NameEncryptionOff, "", "", true, nil)
c.setEncryptedSuffix(".jpg")
assert.Equal(t, "1/12/123.jpg", c.EncryptFileName("1/12/123"))
// Off mode with empty suffix
c.setEncryptedSuffix("none")
assert.Equal(t, "1/12/123", c.EncryptFileName("1/12/123"))
// Obfuscation mode // Obfuscation mode
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil) c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello")) assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
@@ -490,27 +484,21 @@ func TestNonStandardDecryptFileName(t *testing.T) {
in string in string
expected string expected string
expectedErr error expectedErr error
customSuffix string
}{ }{
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil, ""}, {NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile, ""}, {NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile, ""}, {NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil, ""}, {NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil, ""}, {NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil, ""}, {NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionOff, true, "1/12/123.jpg", "1/12/123", nil, ".jpg"}, {NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil, "none"}, {NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil, ""}, {NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile, ""}, {NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil, ""}, {NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil, ""}, {NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil, ""},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil, ""},
} { } {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc) c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
if test.customSuffix != "" {
c.setEncryptedSuffix(test.customSuffix)
}
actual, actualErr := c.DecryptFileName(test.in) actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode) what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what) assert.Equal(t, test.expected, actual, what)
@@ -739,7 +727,7 @@ func TestNonceFromReader(t *testing.T) {
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x) assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
buf = bytes.NewBufferString("123456789abcdefghijklmn") buf = bytes.NewBufferString("123456789abcdefghijklmn")
err = x.fromReader(buf) err = x.fromReader(buf)
assert.EqualError(t, err, "short read of nonce: EOF") assert.Error(t, err, "short read of nonce")
} }
func TestNonceFromBuf(t *testing.T) { func TestNonceFromBuf(t *testing.T) {
@@ -1063,7 +1051,7 @@ func TestRandomSource(t *testing.T) {
_, _ = source.Read(buf) _, _ = source.Read(buf)
sink = newRandomSource(1e8) sink = newRandomSource(1e8)
_, err = io.Copy(sink, source) _, err = io.Copy(sink, source)
assert.EqualError(t, err, "Error in stream at 1") assert.Error(t, err, "Error in stream")
} }
type zeroes struct{} type zeroes struct{}
@@ -1085,7 +1073,7 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
source := newRandomSource(copySize) source := newRandomSource(copySize)
encrypted, err := c.newEncrypter(source, nil) encrypted, err := c.newEncrypter(source, nil)
assert.NoError(t, err) assert.NoError(t, err)
decrypted, err := c.newDecrypter(io.NopCloser(encrypted)) decrypted, err := c.newDecrypter(ioutil.NopCloser(encrypted))
assert.NoError(t, err) assert.NoError(t, err)
sink := newRandomSource(copySize) sink := newRandomSource(copySize)
n, err := io.CopyBuffer(sink, decrypted, buf) n, err := io.CopyBuffer(sink, decrypted, buf)
@@ -1156,15 +1144,15 @@ func TestEncryptData(t *testing.T) {
buf := bytes.NewBuffer(test.in) buf := bytes.NewBuffer(test.in)
encrypted, err := c.EncryptData(buf) encrypted, err := c.EncryptData(buf)
assert.NoError(t, err) assert.NoError(t, err)
out, err := io.ReadAll(encrypted) out, err := ioutil.ReadAll(encrypted)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, test.expected, out) assert.Equal(t, test.expected, out)
// Check we can decode the data properly too... // Check we can decode the data properly too...
buf = bytes.NewBuffer(out) buf = bytes.NewBuffer(out)
decrypted, err := c.DecryptData(io.NopCloser(buf)) decrypted, err := c.DecryptData(ioutil.NopCloser(buf))
assert.NoError(t, err) assert.NoError(t, err)
out, err = io.ReadAll(decrypted) out, err = ioutil.ReadAll(decrypted)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, test.in, out) assert.Equal(t, test.in, out)
} }
@@ -1180,13 +1168,13 @@ func TestNewEncrypter(t *testing.T) {
fh, err := c.newEncrypter(z, nil) fh, err := c.newEncrypter(z, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce) assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32]) assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
// Test error path // Test error path
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn") c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
fh, err = c.newEncrypter(z, nil) fh, err = c.newEncrypter(z, nil)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, "short read of nonce: EOF") assert.Error(t, err, "short read of nonce")
} }
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to // Test the stream returning 0, io.ErrUnexpectedEOF - this used to
@@ -1199,7 +1187,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newEncrypter(in, nil) fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(io.Discard, fh, 1e6) n, err := io.CopyN(ioutil.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n) assert.Equal(t, int64(32), n)
} }
@@ -1237,7 +1225,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0[:i])) cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
fh, err = c.newDecrypter(cd) fh, err = c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error()) assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
} }
@@ -1245,7 +1233,7 @@ func TestNewDecrypter(t *testing.T) {
cd = newCloseDetector(er) cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd) fh, err = c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, "potato") assert.Error(t, err, "potato")
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
// bad magic // bad magic
@@ -1256,7 +1244,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0copy)) cd := newCloseDetector(bytes.NewBuffer(file0copy))
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error()) assert.Error(t, err, ErrorEncryptedBadMagic.Error())
file0copy[i] ^= 0x1 file0copy[i] ^= 0x1
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
} }
@@ -1269,12 +1257,12 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF} in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16) in1 := bytes.NewBuffer(file16)
in := io.NopCloser(io.MultiReader(in1, in2)) in := ioutil.NopCloser(io.MultiReader(in1, in2))
fh, err := c.newDecrypter(in) fh, err := c.newDecrypter(in)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(io.Discard, fh, 1e6) n, err := io.CopyN(ioutil.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n) assert.Equal(t, int64(16), n)
} }
@@ -1286,14 +1274,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
// Make random data // Make random data
const dataSize = 150000 const dataSize = 150000
plaintext, err := io.ReadAll(newRandomSource(dataSize)) plaintext, err := ioutil.ReadAll(newRandomSource(dataSize))
assert.NoError(t, err) assert.NoError(t, err)
// Encrypt the data // Encrypt the data
buf := bytes.NewBuffer(plaintext) buf := bytes.NewBuffer(plaintext)
encrypted, err := c.EncryptData(buf) encrypted, err := c.EncryptData(buf)
assert.NoError(t, err) assert.NoError(t, err)
ciphertext, err := io.ReadAll(encrypted) ciphertext, err := ioutil.ReadAll(encrypted)
assert.NoError(t, err) assert.NoError(t, err)
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
@@ -1307,9 +1295,12 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
end := len(ciphertext) end := len(ciphertext)
if underlyingLimit >= 0 { if underlyingLimit >= 0 {
end = min(int(underlyingOffset+underlyingLimit), len(ciphertext)) end = int(underlyingOffset + underlyingLimit)
if end > len(ciphertext) {
end = len(ciphertext)
} }
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end])) }
reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil return reader, nil
} }
@@ -1487,7 +1478,7 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// Test truncating the file at each possible point // Test truncating the file at each possible point
for i := range len(file16) - 1 { for i := 0; i < len(file16)-1; i++ {
what := fmt.Sprintf("truncating to %d/%d", i, len(file16)) what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
cd := newCloseDetector(bytes.NewBuffer(file16[:i])) cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
@@ -1499,16 +1490,14 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err, what) assert.NoError(t, err, what)
continue continue
} }
_, err = io.ReadAll(fh) _, err = ioutil.ReadAll(fh)
var expectedErr error var expectedErr error
switch { switch {
case i == fileHeaderSize: case i == fileHeaderSize:
// This would normally produce an error *except* on the first block // This would normally produce an error *except* on the first block
expectedErr = nil expectedErr = nil
case i <= fileHeaderSize+blockHeaderSize:
expectedErr = ErrorEncryptedFileBadHeader
default: default:
expectedErr = ErrorEncryptedBadBlock expectedErr = io.ErrUnexpectedEOF
} }
if expectedErr != nil { if expectedErr != nil {
assert.EqualError(t, err, expectedErr.Error(), what) assert.EqualError(t, err, expectedErr.Error(), what)
@@ -1525,8 +1514,8 @@ func TestDecrypterRead(t *testing.T) {
cd := newCloseDetector(in) cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
assert.NoError(t, err) assert.NoError(t, err)
_, err = io.ReadAll(fh) _, err = ioutil.ReadAll(fh)
assert.EqualError(t, err, "potato") assert.Error(t, err, "potato")
assert.Equal(t, 0, cd.closed) assert.Equal(t, 0, cd.closed)
// Test corrupting the input // Test corrupting the input
@@ -1535,28 +1524,17 @@ func TestDecrypterRead(t *testing.T) {
copy(file16copy, file16) copy(file16copy, file16)
for i := range file16copy { for i := range file16copy {
file16copy[i] ^= 0xFF file16copy[i] ^= 0xFF
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy))) fh, err := c.newDecrypter(ioutil.NopCloser(bytes.NewBuffer(file16copy)))
if i < fileMagicSize { if i < fileMagicSize {
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error()) assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.Nil(t, fh) assert.Nil(t, fh)
} else { } else {
assert.NoError(t, err) assert.NoError(t, err)
_, err = io.ReadAll(fh) _, err = ioutil.ReadAll(fh)
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error()) assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
} }
file16copy[i] ^= 0xFF file16copy[i] ^= 0xFF
} }
// Test that we can corrupt a byte and read zeroes if
// passBadBlocks is set
copy(file16copy, file16)
file16copy[len(file16copy)-1] ^= 0xFF
c.passBadBlocks = true
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
assert.NoError(t, err)
buf, err := io.ReadAll(fh)
assert.NoError(t, err)
assert.Equal(t, make([]byte, 16), buf)
} }
func TestDecrypterClose(t *testing.T) { func TestDecrypterClose(t *testing.T) {
@@ -1577,7 +1555,7 @@ func TestDecrypterClose(t *testing.T) {
// double close // double close
err = fh.Close() err = fh.Close()
assert.EqualError(t, err, ErrorFileClosed.Error()) assert.Error(t, err, ErrorFileClosed.Error())
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
// try again reading the file this time // try again reading the file this time
@@ -1587,7 +1565,7 @@ func TestDecrypterClose(t *testing.T) {
assert.Equal(t, 0, cd.closed) assert.Equal(t, 0, cd.closed)
// close after reading // close after reading
out, err := io.ReadAll(fh) out, err := ioutil.ReadAll(fh)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, []byte{1}, out) assert.Equal(t, []byte{1}, out)
assert.Equal(t, io.EOF, fh.err) assert.Equal(t, io.EOF, fh.err)
@@ -1604,6 +1582,8 @@ func TestPutGetBlock(t *testing.T) {
block := c.getBlock() block := c.getBlock()
c.putBlock(block) c.putBlock(block)
c.putBlock(block) c.putBlock(block)
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
} }
func TestKey(t *testing.T) { func TestKey(t *testing.T) {

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
) )
// Globals // Globals
@@ -49,7 +48,7 @@ func init() {
Help: "Very simple filename obfuscation.", Help: "Very simple filename obfuscation.",
}, { }, {
Value: "off", Value: "off",
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.", Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
}, },
}, },
}, { }, {
@@ -80,9 +79,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
}, { }, {
Name: "server_side_across_configs", Name: "server_side_across_configs",
Default: false, Default: false,
Help: `Deprecated: use --server-side-across-configs instead. Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it. pointing to the same backend you can use it.
@@ -122,25 +119,6 @@ names, or for debugging purposes.`,
Help: "Encrypt file data.", Help: "Encrypt file data.",
}, },
}, },
}, {
Name: "pass_bad_blocks",
Help: `If set this will pass bad blocks through as all 0.
This should not be set in normal operation, it should only be set if
trying to recover an encrypted file with errors and it is desired to
recover as much of the file as possible.`,
Default: false,
Advanced: true,
}, {
Name: "strict_names",
Help: `If set, this will raise an error when crypt comes across a filename that can't be decrypted.
(By default, rclone will just log a NOTICE and continue as normal.)
This can happen if encrypted and unencrypted files are stored in the same
directory (which is not recommended.) It may also indicate a more serious
problem that should be investigated.`,
Default: false,
Advanced: true,
}, { }, {
Name: "filename_encoding", Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string. Help: `How to encode the encrypted filename to text string.
@@ -160,18 +138,10 @@ length and if it's case sensitive.`,
}, },
{ {
Value: "base32768", Value: "base32768",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)", Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
}, },
}, },
Advanced: true, Advanced: true,
}, {
Name: "suffix",
Help: `If this is set it will override the default suffix of ".bin".
Setting suffix to "none" will result in an empty suffix. This may be useful
when the path length is critical.`,
Default: ".bin",
Advanced: true,
}}, }},
}) })
} }
@@ -204,8 +174,6 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to make cipher: %w", err) return nil, fmt.Errorf("failed to make cipher: %w", err)
} }
cipher.setEncryptedSuffix(opt.Suffix)
cipher.setPassBadBlocks(opt.PassBadBlocks)
return cipher, nil return cipher, nil
} }
@@ -264,17 +232,10 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
cipher: cipher, cipher: cipher,
} }
cache.PinUntilFinalized(f.Fs, f) cache.PinUntilFinalized(f.Fs, f)
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
// the features here are ones we could support, and they are // the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs // ANDed with the ones from wrappedFs
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff, CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true, DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false, WriteMimeType: false,
@@ -286,17 +247,8 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: true, UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// Enable ListP always
f.features.ListP = f.ListP
return f, err return f, err
} }
@@ -310,10 +262,7 @@ type Options struct {
Password2 string `config:"password2"` Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"` ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"` ShowMapping bool `config:"show_mapping"`
PassBadBlocks bool `config:"pass_bad_blocks"`
FilenameEncoding string `config:"filename_encoding"` FilenameEncoding string `config:"filename_encoding"`
Suffix string `config:"suffix"`
StrictNames bool `config:"strict_names"`
} }
// Fs represents a wrapped fs.Fs // Fs represents a wrapped fs.Fs
@@ -348,64 +297,45 @@ func (f *Fs) String() string {
} }
// Encrypt an object file name to entries. // Encrypt an object file name to entries.
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) error { func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
remote := obj.Remote() remote := obj.Remote()
decryptedRemote, err := f.cipher.DecryptFileName(remote) decryptedRemote, err := f.cipher.DecryptFileName(remote)
if err != nil { if err != nil {
if f.opt.StrictNames { fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
return fmt.Errorf("%s: undecryptable file name detected: %v", remote, err) return
}
fs.Logf(remote, "Skipping undecryptable file name: %v", err)
return nil
} }
if f.opt.ShowMapping { if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote) fs.Logf(decryptedRemote, "Encrypts to %q", remote)
} }
*entries = append(*entries, f.newObject(obj)) *entries = append(*entries, f.newObject(obj))
return nil
} }
// Encrypt a directory file name to entries. // Encrypt a directory file name to entries.
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) error { func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote() remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote) decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil { if err != nil {
if f.opt.StrictNames { fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
return fmt.Errorf("%s: undecryptable dir name detected: %v", remote, err) return
}
fs.Logf(remote, "Skipping undecryptable dir name: %v", err)
return nil
} }
if f.opt.ShowMapping { if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote) fs.Logf(decryptedRemote, "Encrypts to %q", remote)
} }
*entries = append(*entries, f.newDir(ctx, dir)) *entries = append(*entries, f.newDir(ctx, dir))
return nil
} }
// Encrypt some directory entries. This alters entries returning it as newEntries. // Encrypt some directory entries. This alters entries returning it as newEntries.
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) { func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter newEntries = entries[:0] // in place filter
errors := 0
var firsterr error
for _, entry := range entries { for _, entry := range entries {
switch x := entry.(type) { switch x := entry.(type) {
case fs.Object: case fs.Object:
err = f.add(&newEntries, x) f.add(&newEntries, x)
case fs.Directory: case fs.Directory:
err = f.addDir(ctx, &newEntries, x) f.addDir(ctx, &newEntries, x)
default: default:
return nil, fmt.Errorf("unknown object type %T", entry) return nil, fmt.Errorf("unknown object type %T", entry)
} }
if err != nil {
errors++
if firsterr == nil {
firsterr = err
}
}
}
if firsterr != nil {
return nil, fmt.Errorf("there were %v undecryptable name errors. first error: %v", errors, firsterr)
} }
return newEntries, nil return newEntries, nil
} }
@@ -420,40 +350,11 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f) entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.encryptEntries(ctx, entries)
if err != nil { if err != nil {
return err return nil, err
} }
return callback(entries) return f.encryptEntries(ctx, entries)
}
listP := f.Fs.Features().ListP
encryptedDir := f.cipher.EncryptDirName(dir)
if listP == nil {
entries, err := f.Fs.List(ctx, encryptedDir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, encryptedDir, wrappedCallback)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -495,8 +396,6 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream // put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
ci := fs.GetConfig(ctx)
if f.opt.NoDataEncryption { if f.opt.NoDataEncryption {
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...) o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
if err == nil && o != nil { if err == nil && o != nil {
@@ -514,9 +413,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// Find a hash the destination supports to compute a hash of // Find a hash the destination supports to compute a hash of
// the encrypted data // the encrypted data
ht := f.Fs.Hashes().GetOne() ht := f.Fs.Hashes().GetOne()
if ci.IgnoreChecksum {
ht = hash.None
}
var hasher *hash.MultiHasher var hasher *hash.MultiHasher
if ht != hash.None { if ht != hash.None {
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht)) hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
@@ -553,7 +449,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil { if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err) fs.Errorf(o, "Failed to remove corrupted object: %v", err)
} }
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash) return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
} }
fs.Debugf(src, "%v = %s OK", ht, srcHash) fs.Debugf(src, "%v = %s OK", ht, srcHash)
} }
@@ -588,37 +484,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir)) return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
} }
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
do := f.Fs.Features().MkdirMetadata
if do == nil {
return nil, fs.ErrorNotImplemented
}
newDir, err := do(ctx, f.cipher.EncryptDirName(dir), metadata)
if err != nil {
return nil, err
}
var entries = make(fs.DirEntries, 0, 1)
err = f.addDir(ctx, &entries, newDir)
if err != nil {
return nil, err
}
newDir, ok := entries[0].(fs.Directory)
if !ok {
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
}
return newDir, nil
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
do := f.Fs.Features().DirSetModTime
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx, f.cipher.EncryptDirName(dir), modTime)
}
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
@@ -860,7 +725,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
} }
out := make([]fs.Directory, len(dirs)) out := make([]fs.Directory, len(dirs))
for i, dir := range dirs { for i, dir := range dirs {
out[i] = fs.NewDirWrapper(f.cipher.EncryptDirName(dir.Remote()), dir) out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
} }
return do(ctx, out) return do(ctx, out)
} }
@@ -957,7 +822,7 @@ Usage Example:
// The result should be capable of being JSON encoded // The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user // If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that // otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name { switch name {
case "decode": case "decode":
out := make([]string, 0, len(arg)) out := make([]string, 0, len(arg))
@@ -1096,14 +961,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// newDir returns a dir with the Name decrypted // newDir returns a dir with the Name decrypted
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory { func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(ctx, dir)
remote := dir.Remote() remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote) decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil { if err != nil {
fs.Debugf(remote, "Undecryptable dir name: %v", err) fs.Debugf(remote, "Undecryptable dir name: %v", err)
} else { } else {
remote = decryptedRemote newDir.SetRemote(decryptedRemote)
} }
newDir := fs.NewDirWrapper(remote, dir)
return newDir return newDir
} }
@@ -1182,11 +1047,10 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
// Get the underlying object if there is one // Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok { if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion // Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok { } else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Unwrap if it is an operations.OverrideRemote // Otherwise likely is an operations.OverrideRemote
srcObj = do.UnWrap() srcObj = do.UnWrap()
} else { } else {
// Otherwise don't unwrap any further
return "", nil return "", nil
} }
// if this is wrapping a local object then we work out the hash // if this is wrapping a local object then we work out the hash
@@ -1281,17 +1145,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx) return do.Metadata(ctx)
} }
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// MimeType returns the content type of the Object if // MimeType returns the content type of the Object if
// known, or "" if not // known, or "" if not
// //
@@ -1317,8 +1170,6 @@ var (
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil) _ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil) _ fs.MergeDirser = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil) _ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)

View File

@@ -17,28 +17,41 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from // Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) { func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
localFs, err := fs.TemporaryLocalFs(context.Background()) localFs, err := fs.TemporaryLocalFs(context.Background())
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { cleanup = func() {
require.NoError(t, localFs.Rmdir(context.Background(), "")) require.NoError(t, localFs.Rmdir(context.Background(), ""))
}) }
return localFs return localFs, cleanup
} }
// Upload a file to a remote // Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) { func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
inBuf := bytes.NewBufferString(contents) inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC) t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil) upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc) obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { cleanup = func() {
require.NoError(t, obj.Remove(context.Background())) require.NoError(t, obj.Remove(context.Background()))
}) }
return obj return obj, cleanup
} }
// Test the ObjectInfo // Test the ObjectInfo
@@ -52,9 +65,11 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
path = "_wrap" path = "_wrap"
} }
localFs := makeTempLocalFs(t) localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
obj := uploadFile(t, localFs, path, contents) obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
// encrypt the data // encrypt the data
inBuf := bytes.NewBufferString(contents) inBuf := bytes.NewBufferString(contents)
@@ -68,7 +83,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var oi fs.ObjectInfo = obj var oi fs.ObjectInfo = obj
if wrap { if wrap {
// wrap the object in an fs.ObjectUnwrapper if required // wrap the object in an fs.ObjectUnwrapper if required
oi = fs.NewOverrideRemote(oi, "new_remote") oi = testWrapper{oi}
} }
// wrap the object in a crypt for upload using the nonce we // wrap the object in a crypt for upload using the nonce we
@@ -101,13 +116,16 @@ func testComputeHash(t *testing.T, f *Fs) {
t.Skipf("%v: does not support hashes", f.Fs) t.Skipf("%v: does not support hashes", f.Fs)
} }
localFs := makeTempLocalFs(t) localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
// Upload a file to localFs as a test object // Upload a file to localFs as a test object
localObj := uploadFile(t, localFs, path, contents) localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
// Upload the same data to the remote Fs also // Upload the same data to the remote Fs also
remoteObj := uploadFile(t, f, path, contents) remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
// Calculate the expected Hash of the remote object // Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType) computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)

View File

@@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName, RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil), NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base64"}, {Name: name, Key: "filename_encoding", Value: "base64"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base32768"}, {Name: name, Key: "filename_encoding", Value: "base32768"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"}, {Name: name, Key: "filename_encryption", Value: "off"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "obfuscate"}, {Name: name, Key: "filename_encryption", Value: "obfuscate"},
}, },
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
{Name: name, Key: "no_data_encryption", Value: "true"}, {Name: name, Key: "no_data_encryption", Value: "true"},
}, },
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })

View File

@@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte {
} }
length := len(buf) length := len(buf)
padding := n - (length % n) padding := n - (length % n)
for range padding { for i := 0; i < padding; i++ {
buf = append(buf, byte(padding)) buf = append(buf, byte(padding))
} }
if (len(buf) % n) != 0 { if (len(buf) % n) != 0 {
@@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) {
if padding == 0 { if padding == 0 {
return nil, ErrorPaddingTooShort return nil, ErrorPaddingTooShort
} }
for i := range padding { for i := 0; i < padding; i++ {
if buf[length-1-i] != byte(padding) { if buf[length-1-i] != byte(padding) {
return nil, ErrorPaddingNotAllTheSame return nil, ErrorPaddingNotAllTheSame
} }

View File

@@ -1,38 +0,0 @@
// Type definitions specific to Dataverse
package api
// DataverseDatasetResponse is returned by the Dataverse dataset API
type DataverseDatasetResponse struct {
Status string `json:"status"`
Data DataverseDataset `json:"data"`
}
// DataverseDataset is the representation of a dataset
type DataverseDataset struct {
LatestVersion DataverseDatasetVersion `json:"latestVersion"`
}
// DataverseDatasetVersion is the representation of a dataset version
type DataverseDatasetVersion struct {
LastUpdateTime string `json:"lastUpdateTime"`
Files []DataverseFile `json:"files"`
}
// DataverseFile is the representation of a file found in a dataset
type DataverseFile struct {
DirectoryLabel string `json:"directoryLabel"`
DataFile DataverseDataFile `json:"dataFile"`
}
// DataverseDataFile represents file metadata details
type DataverseDataFile struct {
ID int64 `json:"id"`
Filename string `json:"filename"`
ContentType string `json:"contentType"`
FileSize int64 `json:"filesize"`
OriginalFileFormat string `json:"originalFileFormat"`
OriginalFileSize int64 `json:"originalFileSize"`
OriginalFileName string `json:"originalFileName"`
MD5 string `json:"md5"`
}

View File

@@ -1,33 +0,0 @@
// Type definitions specific to InvenioRDM
package api
// InvenioRecordResponse is the representation of a record stored in InvenioRDM
type InvenioRecordResponse struct {
Links InvenioRecordResponseLinks `json:"links"`
}
// InvenioRecordResponseLinks represents a record's links
type InvenioRecordResponseLinks struct {
Self string `json:"self"`
}
// InvenioFilesResponse is the representation of a record's files
type InvenioFilesResponse struct {
Entries []InvenioFilesResponseEntry `json:"entries"`
}
// InvenioFilesResponseEntry is the representation of a file entry
type InvenioFilesResponseEntry struct {
Key string `json:"key"`
Checksum string `json:"checksum"`
Size int64 `json:"size"`
Updated string `json:"updated"`
MimeType string `json:"mimetype"`
Links InvenioFilesResponseEntryLinks `json:"links"`
}
// InvenioFilesResponseEntryLinks represents file links details
type InvenioFilesResponseEntryLinks struct {
Content string `json:"content"`
}

View File

@@ -1,26 +0,0 @@
// Package api has general type definitions for doi
package api
// DoiResolverResponse is returned by the DOI resolver API
//
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
type DoiResolverResponse struct {
ResponseCode int `json:"responseCode"`
Handle string `json:"handle"`
Values []DoiResolverResponseValue `json:"values"`
}
// DoiResolverResponseValue is a single handle record value
type DoiResolverResponseValue struct {
Index int `json:"index"`
Type string `json:"type"`
Data DoiResolverResponseValueData `json:"data"`
TTL int `json:"ttl"`
Timestamp string `json:"timestamp"`
}
// DoiResolverResponseValueData is the data held in a handle value
type DoiResolverResponseValueData struct {
Format string `json:"format"`
Value any `json:"value"`
}

View File

@@ -1,112 +0,0 @@
// Implementation for Dataverse
package doi
import (
"context"
"fmt"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
// Returns true if resolvedURL is likely a DOI hosted on a Dataverse intallation
func activateDataverse(resolvedURL *url.URL) (isActive bool) {
queryValues := resolvedURL.Query()
persistentID := queryValues.Get("persistentId")
return persistentID != ""
}
// Resolve the main API endpoint for a DOI hosted on a Dataverse installation
func resolveDataverseEndpoint(resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
queryValues := resolvedURL.Query()
persistentID := queryValues.Get("persistentId")
query := url.Values{}
query.Add("persistentId", persistentID)
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/datasets/:persistentId/", RawQuery: query.Encode()})
return Dataverse, endpointURL, nil
}
// dataverseProvider implements the doiProvider interface for Dataverse installations
type dataverseProvider struct {
f *Fs
}
// ListEntries returns the full list of entries found at the remote, regardless of root
func (dp *dataverseProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
// Use the cache if populated
cachedEntries, found := dp.f.cache.GetMaybe("files")
if found {
parsedEntries, ok := cachedEntries.([]Object)
if ok {
for _, entry := range parsedEntries {
newEntry := entry
entries = append(entries, &newEntry)
}
return entries, nil
}
}
filesURL := dp.f.endpoint
var res *http.Response
var result api.DataverseDatasetResponse
opts := rest.Opts{
Method: "GET",
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
Parameters: filesURL.Query(),
}
err = dp.f.pacer.Call(func() (bool, error) {
res, err = dp.f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("readDir failed: %w", err)
}
modTime, modTimeErr := time.Parse(time.RFC3339, result.Data.LatestVersion.LastUpdateTime)
if modTimeErr != nil {
fs.Logf(dp.f, "error: could not parse last update time %v", modTimeErr)
modTime = timeUnset
}
for _, file := range result.Data.LatestVersion.Files {
contentURLPath := fmt.Sprintf("/api/access/datafile/%d", file.DataFile.ID)
query := url.Values{}
query.Add("format", "original")
contentURL := dp.f.endpoint.ResolveReference(&url.URL{Path: contentURLPath, RawQuery: query.Encode()})
entry := &Object{
fs: dp.f,
remote: path.Join(file.DirectoryLabel, file.DataFile.Filename),
contentURL: contentURL.String(),
size: file.DataFile.FileSize,
modTime: modTime,
md5: file.DataFile.MD5,
contentType: file.DataFile.ContentType,
}
if file.DataFile.OriginalFileName != "" {
entry.remote = path.Join(file.DirectoryLabel, file.DataFile.OriginalFileName)
entry.size = file.DataFile.OriginalFileSize
entry.contentType = file.DataFile.OriginalFileFormat
}
entries = append(entries, entry)
}
// Populate the cache
cacheEntries := []Object{}
for _, entry := range entries {
cacheEntries = append(cacheEntries, *entry)
}
dp.f.cache.Put("files", cacheEntries)
return entries, nil
}
func newDataverseProvider(f *Fs) doiProvider {
return &dataverseProvider{
f: f,
}
}

View File

@@ -1,649 +0,0 @@
// Package doi provides a filesystem interface for digital objects identified by DOIs.
//
// See: https://www.doi.org/the-identifier/what-is-a-doi/
package doi
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/cache"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
// the URL of the DOI resolver
//
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
doiResolverAPIURL = "https://doi.org/api"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
var (
errorReadOnly = errors.New("doi remotes are read only")
timeUnset = time.Unix(0, 0)
)
func init() {
fsi := &fs.RegInfo{
Name: "doi",
Description: "DOI datasets",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "doi",
Help: "The DOI or the doi.org URL.",
Required: true,
}, {
Name: fs.ConfigProvider,
Help: `DOI provider.
The DOI provider can be set when rclone does not automatically recognize a supported DOI provider.`,
Examples: []fs.OptionExample{
{
Value: "auto",
Help: "Auto-detect provider",
},
{
Value: string(Zenodo),
Help: "Zenodo",
}, {
Value: string(Dataverse),
Help: "Dataverse",
}, {
Value: string(Invenio),
Help: "Invenio",
}},
Required: false,
Advanced: true,
}, {
Name: "doi_resolver_api_url",
Help: `The URL of the DOI resolver API to use.
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
Defaults to "https://doi.org/api".`,
Required: false,
Advanced: true,
}},
}
fs.Register(fsi)
}
// Provider defines the type of provider hosting the DOI
type Provider string
const (
// Zenodo provider, see https://zenodo.org
Zenodo Provider = "zenodo"
// Dataverse provider, see https://dataverse.harvard.edu
Dataverse Provider = "dataverse"
// Invenio provider, see https://inveniordm.docs.cern.ch
Invenio Provider = "invenio"
)
// Options defines the configuration for this backend
type Options struct {
Doi string `config:"doi"` // The DOI, a digital identifier of an object, usually a dataset
Provider string `config:"provider"` // The DOI provider
DoiResolverAPIURL string `config:"doi_resolver_api_url"` // The URL of the DOI resolver API to use.
}
// Fs stores the interface to the remote HTTP files
type Fs struct {
name string // name of this remote
root string // the path we are working on
provider Provider // the DOI provider
doiProvider doiProvider // the interface used to interact with the DOI provider
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
endpoint *url.URL // the main API endpoint for this remote
endpointURL string // endpoint as a string
srv *rest.Client // the connection to the server
pacer *fs.Pacer // pacer for API calls
cache *cache.Cache // a cache for the remote metadata
}
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
type Object struct {
fs *Fs // what this object is part of
remote string // the remote path
contentURL string // the URL where the contents of the file can be downloaded
size int64 // size of the object
modTime time.Time // modification time of the object
contentType string // content type of the object
md5 string // MD5 hash of the object content
}
// doiProvider is the interface used to list objects in a DOI
type doiProvider interface {
// ListEntries returns the full list of entries found at the remote, regardless of root
ListEntries(ctx context.Context) (entries []*Object, err error)
}
// Parse the input string as a DOI
// Examples:
// 10.1000/182 -> 10.1000/182
// https://doi.org/10.1000/182 -> 10.1000/182
// doi:10.1000/182 -> 10.1000/182
func parseDoi(doi string) string {
doiURL, err := url.Parse(doi)
if err != nil {
return doi
}
if doiURL.Scheme == "doi" {
return strings.TrimLeft(strings.TrimPrefix(doi, "doi:"), "/")
}
if strings.HasSuffix(doiURL.Hostname(), "doi.org") {
return strings.TrimLeft(doiURL.Path, "/")
}
return doi
}
// Resolve a DOI to a URL
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
func resolveDoiURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (doiURL *url.URL, err error) {
resolverURL := opt.DoiResolverAPIURL
if resolverURL == "" {
resolverURL = doiResolverAPIURL
}
var result api.DoiResolverResponse
params := url.Values{}
params.Add("index", "1")
opts := rest.Opts{
Method: "GET",
RootURL: resolverURL,
Path: "/handles/" + opt.Doi,
Parameters: params,
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, err
}
if result.ResponseCode != 1 {
return nil, fmt.Errorf("could not resolve DOI (error code %d)", result.ResponseCode)
}
resolvedURLStr := ""
for _, value := range result.Values {
if value.Type == "URL" && value.Data.Format == "string" {
valueStr, ok := value.Data.Value.(string)
if !ok {
return nil, fmt.Errorf("could not resolve DOI (incorrect response format)")
}
resolvedURLStr = valueStr
}
}
resolvedURL, err := url.Parse(resolvedURLStr)
if err != nil {
return nil, err
}
return resolvedURL, nil
}
// Resolve the passed configuration into a provider and enpoint
func resolveEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (provider Provider, endpoint *url.URL, err error) {
resolvedURL, err := resolveDoiURL(ctx, srv, pacer, opt)
if err != nil {
return "", nil, err
}
switch opt.Provider {
case string(Dataverse):
return resolveDataverseEndpoint(resolvedURL)
case string(Invenio):
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
case string(Zenodo):
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
}
hostname := strings.ToLower(resolvedURL.Hostname())
if hostname == "dataverse.harvard.edu" || activateDataverse(resolvedURL) {
return resolveDataverseEndpoint(resolvedURL)
}
if hostname == "zenodo.org" || strings.HasSuffix(hostname, ".zenodo.org") {
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
}
if activateInvenio(ctx, srv, pacer, resolvedURL) {
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
}
return "", nil, fmt.Errorf("provider '%s' is not supported", resolvedURL.Hostname())
}
// Make the http connection from the passed options
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
provider, endpoint, err := resolveEndpoint(ctx, f.srv, f.pacer, opt)
if err != nil {
return false, err
}
// Update f with the new parameters
f.srv.SetRoot(endpoint.ResolveReference(&url.URL{Path: "/"}).String())
f.endpoint = endpoint
f.endpointURL = endpoint.String()
f.provider = provider
f.opt.Provider = string(provider)
switch f.provider {
case Dataverse:
f.doiProvider = newDataverseProvider(f)
case Invenio, Zenodo:
f.doiProvider = newInvenioProvider(f)
default:
return false, fmt.Errorf("provider type '%s' not supported", f.provider)
}
// Determine if the root is a file
entries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return false, err
}
for _, entry := range entries {
if entry.remote == f.root {
isFile = true
break
}
}
return isFile, nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this res and err
// deserve to be retried. It returns the err as a convenience.
func shouldRetry(ctx context.Context, res *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
root = strings.Trim(root, "/")
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
opt.Doi = parseDoi(opt.Doi)
client := fshttp.NewClient(ctx)
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
srv: rest.NewClient(client),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
cache: cache.New(),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
isFile, err := f.httpConnection(ctx, opt)
if err != nil {
return nil, err
}
if isFile {
// return an error with an fs which points to the parent
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.root = newRoot
return f, fs.ErrorIsFile
}
return f, nil
}
// Name returns the configured name of the file system
func (f *Fs) Name() string {
return f.name
}
// Root returns the root for the filesystem
func (f *Fs) Root() string {
return f.root
}
// String returns the URL for the filesystem
func (f *Fs) String() string {
return fmt.Sprintf("DOI %s", f.opt.Doi)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
// return hash.Set(hash.None)
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return errorReadOnly
}
// Remove a remote http file object
func (o *Object) Remove(ctx context.Context) error {
return errorReadOnly
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return errorReadOnly
}
// NewObject creates a new remote http file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
entries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return nil, err
}
remoteFullPath := remote
if f.root != "" {
remoteFullPath = path.Join(f.root, remote)
}
for _, entry := range entries {
if entry.Remote() == remoteFullPath {
return entry, nil
}
}
return nil, fs.ErrorObjectNotFound
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
fileEntries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return nil, fmt.Errorf("error listing %q: %w", dir, err)
}
fullDir := path.Join(f.root, dir)
if fullDir != "" {
fullDir += "/"
}
dirPaths := map[string]bool{}
for _, entry := range fileEntries {
// First, filter out files not in `fullDir`
if !strings.HasPrefix(entry.remote, fullDir) {
continue
}
// Then, find entries in subfolers
remotePath := entry.remote
if fullDir != "" {
remotePath = strings.TrimLeft(strings.TrimPrefix(remotePath, fullDir), "/")
}
parts := strings.SplitN(remotePath, "/", 2)
if len(parts) == 1 {
newEntry := *entry
newEntry.remote = path.Join(dir, remotePath)
entries = append(entries, &newEntry)
} else {
dirPaths[path.Join(dir, parts[0])] = true
}
}
for dirPath := range dirPaths {
entry := fs.NewDir(dirPath, time.Time{})
entries = append(entries, entry)
}
return entries, nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// Fs is the filesystem this remote http file object is located within
func (o *Object) Fs() fs.Info {
return o.fs
}
// String returns the URL to the remote HTTP file
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote the name of the remote HTTP file, relative to the fs root
func (o *Object) Remote() string {
return o.remote
}
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5, nil
}
// Size returns the size in bytes of the remote http file
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the remote http file
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification and access time to the specified time
//
// it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return errorReadOnly
}
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
func (o *Object) Storable() bool {
return true
}
// Open a remote http file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
opts := rest.Opts{
Method: "GET",
RootURL: o.contentURL,
Options: options,
}
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
// Handle non-compliant redirects
if res.Header.Get("Location") != "" {
newURL, err := res.Location()
if err == nil {
opts.RootURL = newURL.String()
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
}
}
return res.Body, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errorReadOnly
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.contentType
}
var commandHelp = []fs.CommandHelp{{
Name: "metadata",
Short: "Show metadata about the DOI.",
Long: `This command returns a JSON object with some information about the DOI.
rclone backend medatadata doi:
It returns a JSON object representing metadata about the DOI.
`,
}, {
Name: "set",
Short: "Set command for updating the config parameters.",
Long: `This set command can be used to update the config parameters
for a running doi backend.
Usage Examples:
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
The option keys are named as they are in the config file.
This rebuilds the connection to the doi backend when it is called with
the new parameters. Only new parameters need be passed as the values
will default to those currently in use.
It doesn't return anything.
`,
}}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "metadata":
return f.ShowMetadata(ctx)
case "set":
newOpt := f.opt
err := configstruct.Set(configmap.Simple(opt), &newOpt)
if err != nil {
return nil, fmt.Errorf("reading config: %w", err)
}
_, err = f.httpConnection(ctx, &newOpt)
if err != nil {
return nil, fmt.Errorf("updating session: %w", err)
}
f.opt = newOpt
keys := []string{}
for k := range opt {
keys = append(keys, k)
}
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// ShowMetadata returns some metadata about the corresponding DOI
func (f *Fs) ShowMetadata(ctx context.Context) (metadata interface{}, err error) {
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
if err != nil {
return nil, err
}
info := map[string]any{}
info["DOI"] = f.opt.Doi
info["URL"] = doiURL.String()
info["metadataURL"] = f.endpointURL
info["provider"] = f.provider
return info, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -1,260 +0,0 @@
package doi
import (
"context"
"crypto/md5"
"encoding/hex"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"net/url"
"sort"
"strings"
"testing"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/hash"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var remoteName = "TestDoi"
func TestParseDoi(t *testing.T) {
// 10.1000/182 -> 10.1000/182
doi := "10.1000/182"
parsed := parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// https://doi.org/10.1000/182 -> 10.1000/182
doi = "https://doi.org/10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// https://dx.doi.org/10.1000/182 -> 10.1000/182
doi = "https://dxdoi.org/10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// doi:10.1000/182 -> 10.1000/182
doi = "doi:10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// doi://10.1000/182 -> 10.1000/182
doi = "doi://10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
}
// prepareMockDoiResolverServer prepares a test server to resolve DOIs
func prepareMockDoiResolverServer(t *testing.T, resolvedURL string) (doiResolverAPIURL string) {
mux := http.NewServeMux()
// Handle requests for resolving DOIs
mux.HandleFunc("GET /api/handles/{handle...}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are resolving a DOI
handle := strings.TrimPrefix(r.URL.Path, "/api/handles/")
assert.NotEmpty(t, handle)
index := r.URL.Query().Get("index")
assert.Equal(t, "1", index)
// Return the most basic response
result := api.DoiResolverResponse{
ResponseCode: 1,
Handle: handle,
Values: []api.DoiResolverResponseValue{
{
Index: 1,
Type: "URL",
Data: api.DoiResolverResponseValueData{
Format: "string",
Value: resolvedURL,
},
},
},
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Make the test server
ts := httptest.NewServer(mux)
// Close the server at the end of the test
t.Cleanup(ts.Close)
return ts.URL + "/api"
}
func md5Sum(text string) string {
hash := md5.Sum([]byte(text))
return hex.EncodeToString(hash[:])
}
// prepareMockZenodoServer prepares a test server that mocks Zenodo.org
func prepareMockZenodoServer(t *testing.T, files map[string]string) *httptest.Server {
mux := http.NewServeMux()
// Handle requests for a single record
mux.HandleFunc("GET /api/records/{recordID...}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are returning data about a single record
recordID := strings.TrimPrefix(r.URL.Path, "/api/records/")
assert.NotEmpty(t, recordID)
// Return the most basic response
selfURL, err := url.Parse("http://" + r.Host)
require.NoError(t, err)
selfURL = selfURL.JoinPath(r.URL.String())
result := api.InvenioRecordResponse{
Links: api.InvenioRecordResponseLinks{
Self: selfURL.String(),
},
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Handle requests for listing files in a record
mux.HandleFunc("GET /api/records/{record}/files", func(w http.ResponseWriter, r *http.Request) {
// Return the most basic response
filesBaseURL, err := url.Parse("http://" + r.Host)
require.NoError(t, err)
filesBaseURL = filesBaseURL.JoinPath("/api/files/")
entries := []api.InvenioFilesResponseEntry{}
for filename, contents := range files {
entries = append(entries,
api.InvenioFilesResponseEntry{
Key: filename,
Checksum: md5Sum(contents),
Size: int64(len(contents)),
Updated: time.Now().UTC().Format(time.RFC3339),
MimeType: "text/plain; charset=utf-8",
Links: api.InvenioFilesResponseEntryLinks{
Content: filesBaseURL.JoinPath(filename).String(),
},
},
)
}
result := api.InvenioFilesResponse{
Entries: entries,
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Handle requests for file contents
mux.HandleFunc("/api/files/{file}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are returning the contents of a file
filename := strings.TrimPrefix(r.URL.Path, "/api/files/")
assert.NotEmpty(t, filename)
contents, found := files[filename]
if !found {
w.WriteHeader(404)
return
}
// Return the most basic response
_, err := w.Write([]byte(contents))
require.NoError(t, err)
})
// Make the test server
ts := httptest.NewServer(mux)
// Close the server at the end of the test
t.Cleanup(ts.Close)
return ts
}
func TestZenodoRemote(t *testing.T) {
recordID := "2600782"
doi := "10.5281/zenodo.2600782"
// The files in the dataset
files := map[string]string{
"README.md": "This is a dataset.",
"data.txt": "Some data",
}
ts := prepareMockZenodoServer(t, files)
resolvedURL := ts.URL + "/record/" + recordID
doiResolverAPIURL := prepareMockDoiResolverServer(t, resolvedURL)
testConfig := configmap.Simple{
"type": "doi",
"doi": doi,
"provider": "zenodo",
"doi_resolver_api_url": doiResolverAPIURL,
}
f, err := NewFs(context.Background(), remoteName, "", testConfig)
require.NoError(t, err)
// Test listing the DOI files
entries, err := f.List(context.Background(), "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, len(files), len(entries))
e := entries[0]
assert.Equal(t, "README.md", e.Remote())
assert.Equal(t, int64(18), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
e = entries[1]
assert.Equal(t, "data.txt", e.Remote())
assert.Equal(t, int64(9), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
// Test reading the DOI files
o, err := f.NewObject(context.Background(), "README.md")
require.NoError(t, err)
assert.Equal(t, int64(18), o.Size())
md5Hash, err := o.Hash(context.Background(), hash.MD5)
require.NoError(t, err)
assert.Equal(t, "464352b1cab5240e44528a56fda33d9d", md5Hash)
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err := io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, []byte(files["README.md"]), data)
do, ok := o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
o, err = f.NewObject(context.Background(), "data.txt")
require.NoError(t, err)
assert.Equal(t, int64(9), o.Size())
md5Hash, err = o.Hash(context.Background(), hash.MD5)
require.NoError(t, err)
assert.Equal(t, "5b82f8bf4df2bfb0e66ccaa7306fd024", md5Hash)
fd, err = o.Open(context.Background())
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, []byte(files["data.txt"]), data)
do, ok = o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
}

View File

@@ -1,16 +0,0 @@
// Test DOI filesystem interface
package doi
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDoi:",
NilObject: (*Object)(nil),
})
}

View File

@@ -1,164 +0,0 @@
// Implementation for InvenioRDM
package doi
import (
"context"
"fmt"
"net/http"
"net/url"
"regexp"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
var invenioRecordRegex = regexp.MustCompile(`\/records?\/(.+)`)
// Returns true if resolvedURL is likely a DOI hosted on an InvenioRDM intallation
func activateInvenio(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (isActive bool) {
_, _, err := resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
return err == nil
}
// Resolve the main API endpoint for a DOI hosted on an InvenioRDM installation
func resolveInvenioEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
var res *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: resolvedURL.String(),
}
err = pacer.Call(func() (bool, error) {
res, err = srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return "", nil, err
}
// First, attempt to grab the API URL from the headers
var linksetURL *url.URL
links := parseLinkHeader(res.Header.Get("Link"))
for _, link := range links {
if link.Rel == "linkset" && link.Type == "application/linkset+json" {
parsed, err := url.Parse(link.Href)
if err == nil {
linksetURL = parsed
break
}
}
}
if linksetURL != nil {
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, linksetURL)
if err == nil {
return Invenio, endpoint, nil
}
fs.Logf(nil, "using linkset URL failed: %s", err.Error())
}
// If there is no linkset header, try to grab the record ID from the URL
recordID := ""
resURL := res.Request.URL
match := invenioRecordRegex.FindStringSubmatch(resURL.EscapedPath())
if match != nil {
recordID = match[1]
guessedURL := res.Request.URL.ResolveReference(&url.URL{
Path: "/api/records/" + recordID,
})
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, guessedURL)
if err == nil {
return Invenio, endpoint, nil
}
fs.Logf(nil, "guessing the URL failed: %s", err.Error())
}
return "", nil, fmt.Errorf("could not resolve the Invenio API endpoint for '%s'", resolvedURL.String())
}
func checkInvenioAPIURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (endpoint *url.URL, err error) {
var result api.InvenioRecordResponse
opts := rest.Opts{
Method: "GET",
RootURL: resolvedURL.String(),
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, err
}
if result.Links.Self == "" {
return nil, fmt.Errorf("could not parse API response from '%s'", resolvedURL.String())
}
return url.Parse(result.Links.Self)
}
// invenioProvider implements the doiProvider interface for InvenioRDM installations
type invenioProvider struct {
f *Fs
}
// ListEntries returns the full list of entries found at the remote, regardless of root
func (ip *invenioProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
// Use the cache if populated
cachedEntries, found := ip.f.cache.GetMaybe("files")
if found {
parsedEntries, ok := cachedEntries.([]Object)
if ok {
for _, entry := range parsedEntries {
newEntry := entry
entries = append(entries, &newEntry)
}
return entries, nil
}
}
filesURL := ip.f.endpoint.JoinPath("files")
var result api.InvenioFilesResponse
opts := rest.Opts{
Method: "GET",
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
}
err = ip.f.pacer.Call(func() (bool, error) {
res, err := ip.f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("readDir failed: %w", err)
}
for _, file := range result.Entries {
modTime, modTimeErr := time.Parse(time.RFC3339, file.Updated)
if modTimeErr != nil {
fs.Logf(ip.f, "error: could not parse last update time %v", modTimeErr)
modTime = timeUnset
}
entry := &Object{
fs: ip.f,
remote: file.Key,
contentURL: file.Links.Content,
size: file.Size,
modTime: modTime,
contentType: file.MimeType,
md5: strings.TrimPrefix(file.Checksum, "md5:"),
}
entries = append(entries, entry)
}
// Populate the cache
cacheEntries := []Object{}
for _, entry := range entries {
cacheEntries = append(cacheEntries, *entry)
}
ip.f.cache.Put("files", cacheEntries)
return entries, nil
}
func newInvenioProvider(f *Fs) doiProvider {
return &invenioProvider{
f: f,
}
}

View File

@@ -1,75 +0,0 @@
package doi
import (
"regexp"
"strings"
)
var linkRegex = regexp.MustCompile(`^<(.+)>$`)
var valueRegex = regexp.MustCompile(`^"(.+)"$`)
// headerLink represents a link as presented in HTTP headers
// MDN Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Link
type headerLink struct {
Href string
Rel string
Type string
Extras map[string]string
}
func parseLinkHeader(header string) (links []headerLink) {
for _, link := range strings.Split(header, ",") {
link = strings.TrimSpace(link)
parsed := parseLink(link)
if parsed != nil {
links = append(links, *parsed)
}
}
return links
}
func parseLink(link string) (parsedLink *headerLink) {
var parts []string
for _, part := range strings.Split(link, ";") {
parts = append(parts, strings.TrimSpace(part))
}
match := linkRegex.FindStringSubmatch(parts[0])
if match == nil {
return nil
}
result := &headerLink{
Href: match[1],
Extras: map[string]string{},
}
for _, keyValue := range parts[1:] {
parsed := parseKeyValue(keyValue)
if parsed != nil {
key, value := parsed[0], parsed[1]
switch strings.ToLower(key) {
case "rel":
result.Rel = value
case "type":
result.Type = value
default:
result.Extras[key] = value
}
}
}
return result
}
func parseKeyValue(keyValue string) []string {
parts := strings.SplitN(keyValue, "=", 2)
if parts[0] == "" || len(parts) < 2 {
return nil
}
match := valueRegex.FindStringSubmatch(parts[1])
if match != nil {
parts[1] = match[1]
return parts
}
return parts
}

View File

@@ -1,44 +0,0 @@
package doi
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseLinkHeader(t *testing.T) {
header := "<https://zenodo.org/api/records/15063252> ; rel=\"linkset\" ; type=\"application/linkset+json\""
links := parseLinkHeader(header)
expected := headerLink{
Href: "https://zenodo.org/api/records/15063252",
Rel: "linkset",
Type: "application/linkset+json",
Extras: map[string]string{},
}
assert.Contains(t, links, expected)
header = "<https://api.example.com/issues?page=2>; rel=\"prev\", <https://api.example.com/issues?page=4>; rel=\"next\", <https://api.example.com/issues?page=10>; rel=\"last\", <https://api.example.com/issues?page=1>; rel=\"first\""
links = parseLinkHeader(header)
expectedList := []headerLink{{
Href: "https://api.example.com/issues?page=2",
Rel: "prev",
Type: "",
Extras: map[string]string{},
}, {
Href: "https://api.example.com/issues?page=4",
Rel: "next",
Type: "",
Extras: map[string]string{},
}, {
Href: "https://api.example.com/issues?page=10",
Rel: "last",
Type: "",
Extras: map[string]string{},
}, {
Href: "https://api.example.com/issues?page=1",
Rel: "first",
Type: "",
Extras: map[string]string{},
}}
assert.Equal(t, links, expectedList)
}

View File

@@ -1,47 +0,0 @@
// Implementation for Zenodo
package doi
import (
"context"
"fmt"
"net/url"
"regexp"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
var zenodoRecordRegex = regexp.MustCompile(`zenodo[.](.+)`)
// Resolve the main API endpoint for a DOI hosted on Zenodo
func resolveZenodoEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL, doi string) (provider Provider, endpoint *url.URL, err error) {
match := zenodoRecordRegex.FindStringSubmatch(doi)
if match == nil {
return "", nil, fmt.Errorf("could not derive API endpoint URL from '%s'", resolvedURL.String())
}
recordID := match[1]
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/records/" + recordID})
var result api.InvenioRecordResponse
opts := rest.Opts{
Method: "GET",
RootURL: endpointURL.String(),
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return "", nil, err
}
endpointURL, err = url.Parse(result.Links.Self)
if err != nil {
return "", nil, err
}
return Zenodo, endpointURL, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -7,6 +7,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"mime" "mime"
"os" "os"
"path" "path"
@@ -77,7 +78,7 @@ var additionalMimeTypes = map[string]string{
// Load the example export formats into exportFormats for testing // Load the example export formats into exportFormats for testing
func TestInternalLoadExampleFormats(t *testing.T) { func TestInternalLoadExampleFormats(t *testing.T) {
fetchFormatsOnce.Do(func() {}) fetchFormatsOnce.Do(func() {})
buf, err := os.ReadFile(filepath.FromSlash("test/about.json")) buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
var about struct { var about struct {
ExportFormats map[string][]string `json:"exportFormats,omitempty"` ExportFormats map[string][]string `json:"exportFormats,omitempty"`
ImportFormats map[string][]string `json:"importFormats,omitempty"` ImportFormats map[string][]string `json:"importFormats,omitempty"`
@@ -95,7 +96,7 @@ func TestInternalParseExtensions(t *testing.T) {
wantErr error wantErr error
}{ }{
{"doc", []string{".doc"}, nil}, {"doc", []string{".doc"}, nil},
{" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil}, {" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
{"docx,svg,Docx", []string{".docx", ".svg"}, nil}, {"docx,svg,Docx", []string{".docx", ".svg"}, nil},
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)}, {"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
} { } {
@@ -243,15 +244,6 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403) quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, quotaExceededRetry) assert.False(t, quotaExceededRetry)
assert.Equal(t, quotaExceededError, expectedQuotaError) assert.Equal(t, quotaExceededError, expectedQuotaError)
sqEItem := googleapi.ErrorItem{
Reason: "storageQuotaExceeded",
}
generic403.Errors[0] = sqEItem
expectedStorageQuotaError := fserrors.FatalError(&generic403)
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, storageQuotaExceededRetry)
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
} }
func (f *Fs) InternalTestDocumentImport(t *testing.T) { func (f *Fs) InternalTestDocumentImport(t *testing.T) {
@@ -479,8 +471,8 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
require.NoError(t, f.Purge(ctx, "trashDir")) require.NoError(t, f.Purge(ctx, "trashDir"))
} }
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID // TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) { func (f *Fs) InternalTestCopyID(t *testing.T) {
ctx := context.Background() ctx := context.Background()
obj, err := f.NewObject(ctx, existingFile) obj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err) require.NoError(t, err)
@@ -498,7 +490,7 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
} }
t.Run("BadID", func(t *testing.T) { t.Run("BadID", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/") err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
require.Error(t, err) require.Error(t, err)
assert.Contains(t, err.Error(), "couldn't find id") assert.Contains(t, err.Error(), "couldn't find id")
}) })
@@ -506,71 +498,22 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
t.Run("Directory", func(t *testing.T) { t.Run("Directory", func(t *testing.T) {
rootID, err := f.dirCache.RootID(ctx, false) rootID, err := f.dirCache.RootID(ctx, false)
require.NoError(t, err) require.NoError(t, err)
err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/") err = f.copyID(ctx, rootID, dir+"/")
require.Error(t, err) require.Error(t, err)
assert.Contains(t, err.Error(), "can't moveid directory") assert.Contains(t, err.Error(), "can't copy directory")
}) })
t.Run("MoveWithoutDestName", func(t *testing.T) { t.Run("WithoutDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/") err = f.copyID(ctx, o.id, dir+"/")
require.NoError(t, err) require.NoError(t, err)
checkFile(path.Base(existingFile)) checkFile(path.Base(existingFile))
}) })
t.Run("CopyWithoutDestName", func(t *testing.T) { t.Run("WithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/") err = f.copyID(ctx, o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile(path.Base(existingFile))
})
t.Run("MoveWithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt")
require.NoError(t, err) require.NoError(t, err)
checkFile("potato.txt") checkFile("potato.txt")
}) })
t.Run("CopyWithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile("potato.txt")
})
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/Query
func (f *Fs) InternalTestQuery(t *testing.T) {
ctx := context.Background()
var err error
t.Run("BadQuery", func(t *testing.T) {
_, err = f.query(ctx, "this is a bad query")
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to execute query")
})
t.Run("NoMatch", func(t *testing.T) {
results, err := f.query(ctx, fmt.Sprintf("name='%s' and name!='%s'", existingSubDir, existingSubDir))
require.NoError(t, err)
assert.Len(t, results, 0)
})
t.Run("GoodQuery", func(t *testing.T) {
pathSegments := strings.Split(existingFile, "/")
var parent string
for _, item := range pathSegments {
// the file name contains ' characters which must be escaped
escapedItem := f.opt.Enc.FromStandardName(item)
escapedItem = strings.ReplaceAll(escapedItem, `\`, `\\`)
escapedItem = strings.ReplaceAll(escapedItem, `'`, `\'`)
results, err := f.query(ctx, fmt.Sprintf("%strashed=false and name='%s'", parent, escapedItem))
require.NoError(t, err)
require.True(t, len(results) > 0)
for _, result := range results {
assert.True(t, len(result.Id) > 0)
assert.Equal(t, result.Name, item)
}
parent = fmt.Sprintf("'%s' in parents and ", results[0].Id)
}
})
} }
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery // TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
@@ -578,7 +521,7 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
// Check set up for filtering // Check set up for filtering
assert.True(t, f.Features().FilterAware) assert.True(t, f.Features().FilterAware)
opt := &filter.Options{} opt := &filter.Opt{}
err := opt.MaxAge.Set("1h") err := opt.MaxAge.Set("1h")
assert.NoError(t, err) assert.NoError(t, err)
flt, err := filter.NewFilter(opt) flt, err := filter.NewFilter(opt)
@@ -659,8 +602,7 @@ func (f *Fs) InternalTest(t *testing.T) {
}) })
t.Run("Shortcuts", f.InternalTestShortcuts) t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash) t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID) t.Run("CopyID", f.InternalTestCopyID)
t.Run("Query", f.InternalTestQuery)
t.Run("AgeQuery", f.InternalTestAgeQuery) t.Run("AgeQuery", f.InternalTestAgeQuery)
t.Run("ShouldRetry", f.InternalTestShouldRetry) t.Run("ShouldRetry", f.InternalTestShouldRetry)
} }

View File

@@ -1,639 +0,0 @@
package drive
import (
"context"
"encoding/json"
"fmt"
"maps"
"strconv"
"strings"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/errcount"
"golang.org/x/sync/errgroup"
drive "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
)
// system metadata keys which this backend owns
var systemMetadataInfo = map[string]fs.MetadataHelp{
"content-type": {
Help: "The MIME type of the file.",
Type: "string",
Example: "text/plain",
},
"mtime": {
Help: "Time of last modification with mS accuracy.",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999Z07:00",
},
"btime": {
Help: "Time of file birth (creation) with mS accuracy. Note that this is only writable on fresh uploads - it can't be written for updates.",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999Z07:00",
},
"copy-requires-writer-permission": {
Help: "Whether the options to copy, print, or download this file, should be disabled for readers and commenters.",
Type: "boolean",
Example: "true",
},
"writers-can-share": {
Help: "Whether users with only writer permission can modify the file's permissions. Not populated and ignored when setting for items in shared drives.",
Type: "boolean",
Example: "false",
},
"viewed-by-me": {
Help: "Whether the file has been viewed by this user.",
Type: "boolean",
Example: "true",
ReadOnly: true,
},
"owner": {
Help: "The owner of the file. Usually an email address. Enable with --drive-metadata-owner.",
Type: "string",
Example: "user@example.com",
},
"permissions": {
Help: "Permissions in a JSON dump of Google drive format. On shared drives these will only be present if they aren't inherited. Enable with --drive-metadata-permissions.",
Type: "JSON",
Example: "{}",
},
"folder-color-rgb": {
Help: "The color for a folder or a shortcut to a folder as an RGB hex string.",
Type: "string",
Example: "881133",
},
"description": {
Help: "A short description of the file.",
Type: "string",
Example: "Contract for signing",
},
"starred": {
Help: "Whether the user has starred the file.",
Type: "boolean",
Example: "false",
},
"labels": {
Help: "Labels attached to this file in a JSON dump of Googled drive format. Enable with --drive-metadata-labels.",
Type: "JSON",
Example: "[]",
},
}
// Extra fields we need to fetch to implement the system metadata above
var metadataFields = googleapi.Field(strings.Join([]string{
"copyRequiresWriterPermission",
"description",
"folderColorRgb",
"hasAugmentedPermissions",
"owners",
"permissionIds",
"permissions",
"properties",
"starred",
"viewedByMe",
"viewedByMeTime",
"writersCanShare",
}, ","))
// Fields we need to read from permissions
var permissionsFields = googleapi.Field(strings.Join([]string{
"*",
"permissionDetails/*",
}, ","))
// getPermission returns permissions for the fileID and permissionID passed in
func (f *Fs) getPermission(ctx context.Context, fileID, permissionID string, useCache bool) (perm *drive.Permission, inherited bool, err error) {
f.permissionsMu.Lock()
defer f.permissionsMu.Unlock()
if useCache {
perm = f.permissions[permissionID]
if perm != nil {
return perm, false, nil
}
}
fs.Debugf(f, "Fetching permission %q", permissionID)
err = f.pacer.Call(func() (bool, error) {
perm, err = f.svc.Permissions.Get(fileID, permissionID).
Fields(permissionsFields).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, false, err
}
inherited = len(perm.PermissionDetails) > 0 && perm.PermissionDetails[0].Inherited
cleanPermission(perm)
// cache the permission
f.permissions[permissionID] = perm
return perm, inherited, err
}
// Set the permissions on the info
func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions []*drive.Permission) (err error) {
errs := errcount.New()
for _, perm := range permissions {
if perm.Role == "owner" {
// ignore owner permissions - these are set with owner
continue
}
cleanPermissionForWrite(perm)
err := f.pacer.Call(func() (bool, error) {
_, err := f.svc.Permissions.Create(info.Id, perm).
SupportsAllDrives(true).
SendNotificationEmail(false).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
fs.Errorf(f, "Failed to set permission %s for %q: %v", perm.Role, perm.EmailAddress, err)
errs.Add(err)
}
}
err = errs.Err("failed to set permission")
if err != nil {
err = fserrors.NoRetryError(err)
}
return err
}
// Clean attributes from permissions which we can't write
func cleanPermissionForWrite(perm *drive.Permission) {
perm.Deleted = false
perm.DisplayName = ""
perm.Id = ""
perm.Kind = ""
perm.PermissionDetails = nil
perm.TeamDrivePermissionDetails = nil
}
// Clean and cache the permission if not already cached
func (f *Fs) cleanAndCachePermission(perm *drive.Permission) {
f.permissionsMu.Lock()
defer f.permissionsMu.Unlock()
cleanPermission(perm)
if _, found := f.permissions[perm.Id]; !found {
f.permissions[perm.Id] = perm
}
}
// Clean fields we don't need to keep from the permission
func cleanPermission(perm *drive.Permission) {
// DisplayName: Output only. The "pretty" name of the value of the
// permission. The following is a list of examples for each type of
// permission: * `user` - User's full name, as defined for their Google
// account, such as "Joe Smith." * `group` - Name of the Google Group,
// such as "The Company Administrators." * `domain` - String domain
// name, such as "thecompany.com." * `anyone` - No `displayName` is
// present.
perm.DisplayName = ""
// Kind: Output only. Identifies what kind of resource this is. Value:
// the fixed string "drive#permission".
perm.Kind = ""
// PermissionDetails: Output only. Details of whether the permissions on
// this shared drive item are inherited or directly on this item. This
// is an output-only field which is present only for shared drive items.
perm.PermissionDetails = nil
// PhotoLink: Output only. A link to the user's profile photo, if
// available.
perm.PhotoLink = ""
// TeamDrivePermissionDetails: Output only. Deprecated: Output only. Use
// `permissionDetails` instead.
perm.TeamDrivePermissionDetails = nil
}
// Fields we need to read from labels
var labelsFields = googleapi.Field(strings.Join([]string{
"*",
}, ","))
// getLabels returns labels for the fileID passed in
func (f *Fs) getLabels(ctx context.Context, fileID string) (labels []*drive.Label, err error) {
fs.Debugf(f, "Fetching labels for %q", fileID)
listLabels := f.svc.Files.ListLabels(fileID).
Fields(labelsFields).
Context(ctx)
for {
var info *drive.LabelList
err = f.pacer.Call(func() (bool, error) {
info, err = listLabels.Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
labels = append(labels, info.Labels...)
if info.NextPageToken == "" {
break
}
listLabels.PageToken(info.NextPageToken)
}
for _, label := range labels {
cleanLabel(label)
}
return labels, nil
}
// Set the labels on the info
func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.Label) (err error) {
if len(labels) == 0 {
return nil
}
req := drive.ModifyLabelsRequest{}
for _, label := range labels {
req.LabelModifications = append(req.LabelModifications, &drive.LabelModification{
FieldModifications: labelFieldsToFieldModifications(label.Fields),
LabelId: label.Id,
})
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.ModifyLabels(info.Id, &req).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to set labels: %w", err)
}
return nil
}
// Convert label fields into something which can set the fields
func labelFieldsToFieldModifications(fields map[string]drive.LabelField) (out []*drive.LabelFieldModification) {
for id, field := range fields {
var emails []string
for _, user := range field.User {
emails = append(emails, user.EmailAddress)
}
out = append(out, &drive.LabelFieldModification{
// FieldId: The ID of the field to be modified.
FieldId: id,
// SetDateValues: Replaces the value of a dateString Field with these
// new values. The string must be in the RFC 3339 full-date format:
// YYYY-MM-DD.
SetDateValues: field.DateString,
// SetIntegerValues: Replaces the value of an `integer` field with these
// new values.
SetIntegerValues: field.Integer,
// SetSelectionValues: Replaces a `selection` field with these new
// values.
SetSelectionValues: field.Selection,
// SetTextValues: Sets the value of a `text` field.
SetTextValues: field.Text,
// SetUserValues: Replaces a `user` field with these new values. The
// values must be valid email addresses.
SetUserValues: emails,
})
}
return out
}
// Clean fields we don't need to keep from the label
func cleanLabel(label *drive.Label) {
// Kind: This is always drive#label
label.Kind = ""
for name, field := range label.Fields {
// Kind: This is always drive#labelField.
field.Kind = ""
// Note the fields are copies so we need to write them
// back to the map
label.Fields[name] = field
}
}
// Parse the metadata from drive item
//
// It should return nil if there is no Metadata
func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err error) {
metadata := make(fs.Metadata, 16)
// Dump user metadata first as it overrides system metadata
maps.Copy(metadata, info.Properties)
// System metadata
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
metadata["writers-can-share"] = fmt.Sprint(info.WritersCanShare)
metadata["viewed-by-me"] = fmt.Sprint(info.ViewedByMe)
metadata["content-type"] = info.MimeType
// Owners: Output only. The owner of this file. Only certain legacy
// files may have more than one owner. This field isn't populated for
// items in shared drives.
if o.fs.opt.MetadataOwner.IsSet(rwRead) && len(info.Owners) > 0 {
user := info.Owners[0]
if len(info.Owners) > 1 {
fs.Logf(o, "Ignoring more than 1 owner")
}
if user != nil {
id := user.EmailAddress
if id == "" {
id = user.DisplayName
}
metadata["owner"] = id
}
}
if o.fs.opt.MetadataPermissions.IsSet(rwRead) {
// We only write permissions out if they are not inherited.
//
// On My Drives permissions seem to be attached to every item
// so they will always be written out.
//
// On Shared Drives only non-inherited permissions will be
// written out.
// To read the inherited permissions flag will mean we need to
// read the permissions for each object and the cache will be
// useless. However shared drives don't return permissions
// only permissionIds so will need to fetch them for each
// object. We use HasAugmentedPermissions to see if there are
// special permissions before fetching them to save transactions.
// HasAugmentedPermissions: Output only. Whether there are permissions
// directly on this file. This field is only populated for items in
// shared drives.
if o.fs.isTeamDrive && !info.HasAugmentedPermissions {
// Don't process permissions if there aren't any specifically set
fs.Debugf(o, "Ignoring %d permissions and %d permissionIds as is shared drive with hasAugmentedPermissions false", len(info.Permissions), len(info.PermissionIds))
info.Permissions = nil
info.PermissionIds = nil
}
// PermissionIds: Output only. List of permission IDs for users with
// access to this file.
//
// Only process these if we have no Permissions
if len(info.PermissionIds) > 0 && len(info.Permissions) == 0 {
info.Permissions = make([]*drive.Permission, 0, len(info.PermissionIds))
g, gCtx := errgroup.WithContext(ctx)
g.SetLimit(o.fs.ci.Checkers)
var mu sync.Mutex // protect the info.Permissions from concurrent writes
for _, permissionID := range info.PermissionIds {
permissionID := permissionID
g.Go(func() error {
// must fetch the team drive ones individually to check the inherited flag
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
if err != nil {
return fmt.Errorf("failed to read permission: %w", err)
}
// Don't write inherited permissions out
if inherited {
return nil
}
// Don't write owner role out - these are covered by the owner metadata
if perm.Role == "owner" {
return nil
}
mu.Lock()
info.Permissions = append(info.Permissions, perm)
mu.Unlock()
return nil
})
}
err = g.Wait()
if err != nil {
return err
}
} else {
// Clean the fetched permissions
for _, perm := range info.Permissions {
o.fs.cleanAndCachePermission(perm)
}
}
// Permissions: Output only. The full list of permissions for the file.
// This is only available if the requesting user can share the file. Not
// populated for items in shared drives.
if len(info.Permissions) > 0 {
buf, err := json.Marshal(info.Permissions)
if err != nil {
return fmt.Errorf("failed to marshal permissions: %w", err)
}
metadata["permissions"] = string(buf)
}
// Permission propagation
// https://developers.google.com/drive/api/guides/manage-sharing#permission-propagation
// Leads me to believe that in non shared drives, permissions
// are added to each item when you set permissions for a
// folder whereas in shared drives they are inherited and
// placed on the item directly.
}
if info.FolderColorRgb != "" {
metadata["folder-color-rgb"] = info.FolderColorRgb
}
if info.Description != "" {
metadata["description"] = info.Description
}
metadata["starred"] = fmt.Sprint(info.Starred)
metadata["btime"] = info.CreatedTime
metadata["mtime"] = info.ModifiedTime
if o.fs.opt.MetadataLabels.IsSet(rwRead) {
// FIXME would be really nice if we knew if files had labels
// before listing but we need to know all possible label IDs
// to get it in the listing.
labels, err := o.fs.getLabels(ctx, actualID(info.Id))
if err != nil {
return fmt.Errorf("failed to fetch labels: %w", err)
}
buf, err := json.Marshal(labels)
if err != nil {
return fmt.Errorf("failed to marshal labels: %w", err)
}
metadata["labels"] = string(buf)
}
o.metadata = &metadata
return nil
}
// Set the owner on the info
func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err error) {
perm := drive.Permission{
Role: "owner",
EmailAddress: owner,
// Type: The type of the grantee. Valid values are: * `user` * `group` *
// `domain` * `anyone` When creating a permission, if `type` is `user`
// or `group`, you must provide an `emailAddress` for the user or group.
// When `type` is `domain`, you must provide a `domain`. There isn't
// extra information required for an `anyone` type.
Type: "user",
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Permissions.Create(info.Id, &perm).
SupportsAllDrives(true).
TransferOwnership(true).
// SendNotificationEmail(false). - required apparently!
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to set owner: %w", err)
}
return nil
}
// Call back to set metadata that can't be set on the upload/update
//
// The *drive.File passed in holds the current state of the drive.File
// and this should update it with any modifications.
type updateMetadataFn func(context.Context, *drive.File) error
// read the metadata from meta and write it into updateInfo
//
// update should be true if this is being used to create metadata for
// an update/PATCH call as the rules on what can be updated are
// slightly different there.
//
// It returns a callback which should be called to finish the updates
// after the data is uploaded.
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) {
callbackFns := []updateMetadataFn{}
callback = func(ctx context.Context, info *drive.File) error {
for _, fn := range callbackFns {
err := fn(ctx, info)
if err != nil {
return err
}
}
return nil
}
// merge metadata into request and user metadata
for k, v := range meta {
k, v := k, v
// parse a boolean from v and write into out
parseBool := func(out *bool) error {
b, err := strconv.ParseBool(v)
if err != nil {
return fmt.Errorf("can't parse metadata %q = %q: %w", k, v, err)
}
*out = b
return nil
}
switch k {
case "copy-requires-writer-permission":
if isFolder {
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
return nil, err
}
case "writers-can-share":
if !f.isTeamDrive {
if err := parseBool(&updateInfo.WritersCanShare); err != nil {
return nil, err
}
} else {
fs.Debugf(f, "Ignoring %s=%s as can't set on shared drives", k, v)
}
case "viewed-by-me":
// Can't write this
case "content-type":
updateInfo.MimeType = v
case "owner":
if !f.opt.MetadataOwner.IsSet(rwWrite) {
continue
}
// Can't set Owner on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
err := f.setOwner(ctx, info, v)
if err != nil && f.opt.MetadataOwner.IsSet(rwFailOK) {
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
return nil
}
return err
})
case "permissions":
if !f.opt.MetadataPermissions.IsSet(rwWrite) {
continue
}
var perms []*drive.Permission
err := json.Unmarshal([]byte(v), &perms)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal permissions: %w", err)
}
// Can't set Permissions on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
err := f.setPermissions(ctx, info, perms)
if err != nil && f.opt.MetadataPermissions.IsSet(rwFailOK) {
// We've already logged the permissions errors individually here
fs.Debugf(f, "Ignoring error as failok is set: %v", err)
return nil
}
return err
})
case "labels":
if !f.opt.MetadataLabels.IsSet(rwWrite) {
continue
}
var labels []*drive.Label
err := json.Unmarshal([]byte(v), &labels)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal labels: %w", err)
}
// Can't set Labels on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
err := f.setLabels(ctx, info, labels)
if err != nil && f.opt.MetadataLabels.IsSet(rwFailOK) {
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
return nil
}
return err
})
case "folder-color-rgb":
updateInfo.FolderColorRgb = v
case "description":
updateInfo.Description = v
case "starred":
if err := parseBool(&updateInfo.Starred); err != nil {
return nil, err
}
case "btime":
if update {
fs.Debugf(f, "Skipping btime metadata as can't update it on an existing file: %v", v)
} else {
updateInfo.CreatedTime = v
}
case "mtime":
updateInfo.ModifiedTime = v
default:
if updateInfo.Properties == nil {
updateInfo.Properties = make(map[string]string, 1)
}
updateInfo.Properties[k] = v
}
}
return callback, nil
}
// Fetch metadata and update updateInfo if --metadata is in use
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *drive.File, update bool) (callback updateMetadataFn, err error) {
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
if err != nil {
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
}
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false)
if err != nil {
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
}
return callback, nil
}

View File

@@ -177,7 +177,10 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
if start >= rx.ContentLength { if start >= rx.ContentLength {
break break
} }
reqSize = min(rx.ContentLength-start, int64(rx.f.opt.ChunkSize)) reqSize = rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize) chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
} else { } else {
// If size unknown read into buffer // If size unknown read into buffer

View File

@@ -8,22 +8,130 @@ package dropbox
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
) )
const (
maxBatchSize = 1000 // max size the batch can be
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
defaultBatchSizeAsync = 100 // default batch size if async
)
// batcher holds info about the current items waiting for upload
type batcher struct {
f *Fs // Fs this batch is part of
mode string // configured batch mode
size int // maximum size for batch
timeout time.Duration // idle timeout for batch
async bool // whether we are using async batching
in chan batcherRequest // incoming items to batch
closed chan struct{} // close to indicate batcher shut down
atexit atexit.FnHandle // atexit handle
shutOnce sync.Once // make sure we shutdown once only
wg sync.WaitGroup // wait for shutdown
}
// batcherRequest holds an incoming request with a place for a reply
type batcherRequest struct {
commitInfo *files.UploadSessionFinishArg
result chan<- batcherResponse
}
// Return true if batcherRequest is the quit request
func (br *batcherRequest) isQuit() bool {
return br.commitInfo == nil
}
// Send this to get the engine to quit
var quitRequest = batcherRequest{}
// batcherResponse holds a response to be delivered to clients waiting
// for a batch to complete.
type batcherResponse struct {
err error
entry *files.FileMetadata
}
// newBatcher creates a new batcher structure
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
switch mode {
case "sync":
if size <= 0 {
ci := fs.GetConfig(ctx)
size = ci.Transfers
}
if timeout <= 0 {
timeout = defaultTimeoutSync
}
case "async":
if size <= 0 {
size = defaultBatchSizeAsync
}
if timeout <= 0 {
timeout = defaultTimeoutAsync
}
async = true
case "off":
size = 0
default:
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
f: f,
mode: mode,
size: size,
timeout: timeout,
async: async,
in: make(chan batcherRequest, size),
closed: make(chan struct{}),
}
if b.Batching() {
b.atexit = atexit.Register(b.Shutdown)
b.wg.Add(1)
go b.commitLoop(context.Background())
}
return b, nil
}
// Batching returns true if batching is active
func (b *batcher) Batching() bool {
return b.size > 0
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete // finishBatch commits the batch, returning a batch status to poll or maybe complete
func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) { func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
var arg = &files.UploadSessionFinishBatchArg{ var arg = &files.UploadSessionFinishBatchArg{
Entries: items, Entries: items,
} }
err = f.pacer.Call(func() (bool, error) { err = b.f.pacer.Call(func() (bool, error) {
complete, err = f.srv.UploadSessionFinishBatchV2(arg) complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
if retry, err := shouldRetryExclude(ctx, err); !retry { // If error is insufficient space then don't retry
return retry, err if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
} }
// after the first chunk is uploaded, we retry everything except the excluded errors }
// after the first chunk is uploaded, we retry everything
return err != nil, err return err != nil, err
}) })
if err != nil { if err != nil {
@@ -32,10 +140,66 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
return complete, nil return complete, nil
} }
// Called by the batcher to commit a batch // finishBatchJobStatus waits for the batch to complete returning completed entries
func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []*files.FileMetadata, errors []error) (err error) { func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxSleepTime = 1 * time.Second
startTime := time.Now()
try := 1
for {
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
if remaining < 0 {
break
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
} else {
if batchStatus.Tag == "complete" {
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > maxSleepTime {
sleepTime = maxSleepTime
}
try++
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
}
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}
}
}
}()
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
fs.Debugf(b.f, "Committing %s", desc)
// finalise the batch getting either a result or a job id to poll // finalise the batch getting either a result or a job id to poll
complete, err := f.finishBatch(ctx, items) complete, err := b.finishBatch(ctx, items)
if err != nil { if err != nil {
return err return err
} }
@@ -46,13 +210,19 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries)) return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
} }
// Format results for return // Report results to clients
var (
errorTag = ""
errorCount = 0
)
for i := range results { for i := range results {
item := entries[i] item := entries[i]
resp := batcherResponse{}
if item.Tag == "success" { if item.Tag == "success" {
results[i] = item.Success resp.entry = item.Success
} else { } else {
errorTag := item.Tag errorCount++
errorTag = item.Tag
if item.Failure != nil { if item.Failure != nil {
errorTag = item.Failure.Tag errorTag = item.Failure.Tag
if item.Failure.LookupFailed != nil { if item.Failure.LookupFailed != nil {
@@ -65,9 +235,112 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
errorTag += "/" + item.Failure.PropertiesError.Tag errorTag += "/" + item.Failure.PropertiesError.Tag
} }
} }
errors[i] = fmt.Errorf("upload failed: %s", errorTag) resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
}
}
// Show signalled so no need to report error to clients from now on
signalled = true
// Report an error if any failed in the batch
if errorTag != "" {
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)
return nil
}
// commitLoop runs the commit engine in the background
func (b *batcher) commitLoop(ctx context.Context) {
var (
items []*files.UploadSessionFinishArg // current batch of uncommitted files
results []chan<- batcherResponse // current batch of clients awaiting results
idleTimer = time.NewTimer(b.timeout)
commit = func() {
err := b.commitBatch(ctx, items, results)
if err != nil {
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
}
items, results = nil, nil
}
)
defer b.wg.Done()
defer idleTimer.Stop()
idleTimer.Stop()
outer:
for {
select {
case req := <-b.in:
if req.isQuit() {
break outer
}
items = append(items, req.commitInfo)
results = append(results, req.result)
idleTimer.Stop()
if len(items) >= b.size {
commit()
} else {
idleTimer.Reset(b.timeout)
}
case <-idleTimer.C:
if len(items) > 0 {
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
commit()
} }
} }
return nil }
// commit any remaining items
if len(items) > 0 {
commit()
}
}
// Shutdown finishes any pending batches then shuts everything down
//
// Can be called from atexit handler
func (b *batcher) Shutdown() {
if !b.Batching() {
return
}
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Committing uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message
//
// Note that we don't close b.in because that will
// cause write to closed channel in Commit when we are
// exiting due to a signal.
b.in <- quitRequest
b.wg.Wait()
})
}
// Commit commits the file using a batch call, first adding it to the
// batch and then waiting for the batch to complete in a synchronous
// way if async is not set.
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
select {
case <-b.closed:
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
default:
}
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
resp := make(chan batcherResponse, 1)
b.in <- batcherRequest{
commitInfo: commitInfo,
result: resp,
}
// If running async then don't wait for the result
if b.async {
return nil, nil
}
result := <-resp
return result.entry, result.err
} }

View File

@@ -55,7 +55,10 @@ func (d *digest) Write(p []byte) (n int, err error) {
n = len(p) n = len(p)
for len(p) > 0 { for len(p) > 0 {
d.writtenMore = true d.writtenMore = true
toWrite := min(bytesPerBlock-d.n, len(p)) toWrite := bytesPerBlock - d.n
if toWrite > len(p) {
toWrite = len(p)
}
_, err = d.blockHash.Write(p[:toWrite]) _, err = d.blockHash.Write(p[:toWrite])
if err != nil { if err != nil {
panic(hashReturnedError) panic(hashReturnedError)

View File

@@ -11,7 +11,7 @@ import (
func testChunk(t *testing.T, chunk int) { func testChunk(t *testing.T, chunk int) {
data := make([]byte, chunk) data := make([]byte, chunk)
for i := range chunk { for i := 0; i < chunk; i++ {
data[i] = 'A' data[i] = 'A'
} }
for _, test := range []struct { for _, test := range []struct {

View File

@@ -47,8 +47,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
@@ -60,7 +58,7 @@ import (
const ( const (
rcloneClientID = "5jcck7diasz0rqy" rcloneClientID = "5jcck7diasz0rqy"
rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g" rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
defaultMinSleep = fs.Duration(10 * time.Millisecond) minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
// Upload chunk size - setting too small makes uploads slow. // Upload chunk size - setting too small makes uploads slow.
@@ -92,12 +90,9 @@ const (
maxFileNameLength = 255 maxFileNameLength = 255
) )
type exportAPIFormat string
type exportExtension string // dotless
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
dropboxConfig = &oauthutil.Config{ dropboxConfig = &oauth2.Config{
Scopes: []string{ Scopes: []string{
"files.metadata.write", "files.metadata.write",
"files.content.write", "files.content.write",
@@ -112,8 +107,7 @@ var (
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize", // AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token", // TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
// }, // },
AuthURL: dropbox.OAuthEndpoint("").AuthURL, Endpoint: dropbox.OAuthEndpoint(""),
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -127,28 +121,10 @@ var (
// Errors // Errors
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode")) errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
// Configure the batcher
defaultBatcherOptions = batcher.Options{
MaxBatchSize: 1000,
DefaultTimeoutSync: 500 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 100,
}
exportKnownAPIFormats = map[exportAPIFormat]exportExtension{
"markdown": "md",
"html": "html",
}
// Populated based on exportKnownAPIFormats
exportKnownExtensions = map[exportExtension]exportAPIFormat{}
paperExtension = ".paper"
paperTemplateExtension = ".papert"
) )
// Gets an oauth config with the right scopes // Gets an oauth config with the right scopes
func getOauthConfig(m configmap.Mapper) *oauthutil.Config { func getOauthConfig(m configmap.Mapper) *oauth2.Config {
// If not impersonating, use standard scopes // If not impersonating, use standard scopes
if impersonate, _ := m.Get("impersonate"); impersonate == "" { if impersonate, _ := m.Get("impersonate"); impersonate == "" {
return dropboxConfig return dropboxConfig
@@ -176,7 +152,7 @@ func init() {
}, },
}) })
}, },
Options: append(append(oauthutil.SharedOptions, []fs.Option{{ Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "chunk_size", Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size (< %v). Help: fmt.Sprintf(`Upload chunk size (< %v).
@@ -208,7 +184,6 @@ v1.55 or later is in use everywhere.
`, `,
Default: "", Default: "",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "shared_files", Name: "shared_files",
Help: `Instructs rclone to work on individual shared files. Help: `Instructs rclone to work on individual shared files.
@@ -231,16 +206,70 @@ are supported.
Note that we don't unmount the shared folder afterwards so the Note that we don't unmount the shared folder afterwards so the
--dropbox-shared-folders can be omitted after the first use of a particular --dropbox-shared-folders can be omitted after the first use of a particular
shared folder. shared folder.`,
See also --dropbox-root-namespace for an alternative way to work with shared
folders.`,
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
Name: "pacer_min_sleep", Name: "batch_mode",
Default: defaultMinSleep, Help: `Upload file batching sync|async|off.
Help: "Minimum time to sleep between API calls.",
This sets the batch mode used by rclone.
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
This has 3 possible values
- off - no batching
- sync - batch uploads and check completion (default)
- async - batch upload and don't check completion
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
`,
Default: "sync",
Advanced: true,
}, {
Name: "batch_size",
Help: `Max number of files in upload batch.
This sets the batch size of files to upload. It has to be less than 1000.
By default this is 0 which means rclone which calculate the batch size
depending on the setting of batch_mode.
- batch_mode: async - default batch_size is 100
- batch_mode: sync - default batch_size is the same as --transfers
- batch_mode: off - not in use
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
Setting this is a great idea if you are uploading lots of small files
as it will make them a lot quicker. You can use --transfers 32 to
maximise throughput.
`,
Default: 0,
Advanced: true,
}, {
Name: "batch_timeout",
Help: `Max time to allow an idle upload batch before uploading.
If an upload batch is idle for more than this long then it will be
uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 500ms
- batch_mode: sync - default batch_timeout is 10s
- batch_mode: off - not in use
`,
Default: fs.Duration(0),
Advanced: true,
}, {
Name: "batch_commit_timeout",
Help: `Max time to wait for a batch to finish committing`,
Default: fs.Duration(10 * time.Minute),
Advanced: true, Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
@@ -255,48 +284,8 @@ folders.`,
encoder.EncodeDel | encoder.EncodeDel |
encoder.EncodeRightSpace | encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8, encoder.EncodeInvalidUtf8,
}, { }}...),
Name: "root_namespace",
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
Default: "",
Advanced: true,
}, {
Name: "export_formats",
Help: `Comma separated list of preferred formats for exporting files
Certain Dropbox files can only be accessed by exporting them to another format.
These include Dropbox Paper documents.
For each such file, rclone will choose the first format on this list that Dropbox
considers valid. If none is valid, it will choose Dropbox's default format.
Known formats include: "html", "md" (markdown)`,
Default: fs.CommaSepList{"html", "md"},
Advanced: true,
}, {
Name: "skip_exports",
Help: "Skip exportable files in all listings.\n\nIf given, exportable files practically become invisible to rclone.",
Default: false,
Advanced: true,
}, {
Name: "show_all_exports",
Default: false,
Help: `Show all exportable files in listings.
Adding this flag will allow all exportable files to be server side copied.
Note that rclone doesn't add extensions to the exportable file names in this mode.
Do **not** use this flag when trying to download exportable files - rclone
will fail to download them.
`,
Advanced: true,
},
}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
}) })
for apiFormat, ext := range exportKnownAPIFormats {
exportKnownExtensions[ext] = apiFormat
}
} }
// Options defines the configuration for this backend // Options defines the configuration for this backend
@@ -308,13 +297,9 @@ type Options struct {
BatchMode string `config:"batch_mode"` BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"` BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"` BatchTimeout fs.Duration `config:"batch_timeout"`
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
AsyncBatch bool `config:"async_batch"` AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
RootNsid string `config:"root_namespace"`
ExportFormats fs.CommaSepList `config:"export_formats"`
SkipExports bool `config:"skip_exports"`
ShowAllExports bool `config:"show_all_exports"`
} }
// Fs represents a remote dropbox server // Fs represents a remote dropbox server
@@ -333,19 +318,9 @@ type Fs struct {
slashRootSlash string // root with "/" prefix and postfix, lowercase slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none ns string // The namespace we are using or "" for none
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata] batcher *batcher // batch builder
exportExts []exportExtension
} }
type exportType int
const (
notExport exportType = iota // a regular file
exportHide // should be hidden
exportListOnly // listable, but can't export
exportExportable // can export
)
// Object describes a dropbox object // Object describes a dropbox object
// //
// Dropbox Objects always have full metadata // Dropbox Objects always have full metadata
@@ -357,9 +332,6 @@ type Object struct {
bytes int64 // size of the object bytes int64 // size of the object
modTime time.Time // time it was last modified modTime time.Time // time it was last modified
hash string // content_hash of the object hash string // content_hash of the object
exportType exportType
exportAPIFormat exportAPIFormat
} }
// Name of the remote (as passed into NewFs) // Name of the remote (as passed into NewFs)
@@ -382,46 +354,32 @@ func (f *Fs) Features() *fs.Features {
return f.features return f.features
} }
// Some specific errors which should be excluded from retries // shouldRetry returns a boolean as to whether this err deserves to be
func shouldRetryExclude(ctx context.Context, err error) (bool, error) { // retried. It returns the err as a convenience
if err == nil { func shouldRetry(ctx context.Context, err error) (bool, error) {
return false, err
}
if fserrors.ContextError(ctx, &err) { if fserrors.ContextError(ctx, &err) {
return false, err return false, err
} }
// First check for specific errors if err == nil {
// return false, err
// These come back from the SDK in a whole host of different }
// error types, but there doesn't seem to be a consistent way
// of reading the error cause, so here we just check using the
// error string which isn't perfect but does the job.
errString := err.Error() errString := err.Error()
// First check for specific errors
if strings.Contains(errString, "insufficient_space") { if strings.Contains(errString, "insufficient_space") {
return false, fserrors.FatalError(err) return false, fserrors.FatalError(err)
} else if strings.Contains(errString, "malformed_path") { } else if strings.Contains(errString, "malformed_path") {
return false, fserrors.NoRetryError(err) return false, fserrors.NoRetryError(err)
} }
return true, err
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if retry, err := shouldRetryExclude(ctx, err); !retry {
return retry, err
}
// Then handle any official Retry-After header from Dropbox's SDK // Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) { switch e := err.(type) {
case auth.RateLimitAPIError: case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 { if e.RateLimitError.RetryAfter > 0 {
fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter) fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second) err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
} }
return true, err return true, err
} }
// Keep old behavior for backward compatibility // Keep old behavior for backward compatibility
errString := err.Error()
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" { if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
return true, err return true, err
} }
@@ -466,7 +424,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
oldToken = strings.TrimSpace(oldToken) oldToken = strings.TrimSpace(oldToken)
if ok && oldToken != "" && oldToken[0] != '{' { if ok && oldToken != "" && oldToken[0] != '{' {
fs.Infof(name, "Converting token to new format") fs.Infof(name, "Converting token to new format")
newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken) newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
err := config.SetValueAndSave(name, config.ConfigToken, newToken) err := config.SetValueAndSave(name, config.ConfigToken, newToken)
if err != nil { if err != nil {
return nil, fmt.Errorf("NewFS convert token: %w", err) return nil, fmt.Errorf("NewFS convert token: %w", err)
@@ -484,13 +442,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name, name: name,
opt: *opt, opt: *opt,
ci: ci, ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
batcherOptions := defaultBatcherOptions f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
batcherOptions.Mode = f.opt.BatchMode
batcherOptions.Size = f.opt.BatchSize
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -500,14 +454,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
HeaderGenerator: f.headerGenerator, HeaderGenerator: f.headerGenerator,
} }
for _, e := range opt.ExportFormats {
ext := exportExtension(e)
if exportKnownExtensions[ext] == "" {
return nil, fmt.Errorf("dropbox: unknown export format '%s'", e)
}
f.exportExts = append(f.exportExts, ext)
}
// unauthorized config for endpoints that fail with auth // unauthorized config for endpoints that fail with auth
ucfg := dropbox.Config{ ucfg := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
@@ -525,15 +471,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
members := []*team.UserSelectorArg{&user} members := []*team.UserSelectorArg{&user}
args := team.NewMembersGetInfoArgs(members) args := team.NewMembersGetInfoArgs(members)
memberIDs, err := f.team.MembersGetInfo(args) memberIds, err := f.team.MembersGetInfo(args)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err) return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
} }
if len(memberIDs) == 0 || memberIDs[0].MemberInfo == nil || memberIDs[0].MemberInfo.Profile == nil { if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate) return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
} }
cfg.AsMemberID = memberIDs[0].MemberInfo.Profile.MemberProfile.TeamMemberId cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
} }
f.srv = files.New(cfg) f.srv = files.New(cfg)
@@ -590,7 +536,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
default: default:
return nil, err return nil, err
} }
// if the mount failed we have to abort here // if the moint failed we have to abort here
} }
// if the mount succeeded it's now a normal folder in the users root namespace // if the mount succeeded it's now a normal folder in the users root namespace
// we disable shared folder mode and proceed normally // we disable shared folder mode and proceed normally
@@ -599,11 +545,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.features.Fill(ctx, f) f.features.Fill(ctx, f)
if f.opt.RootNsid != "" {
f.ns = f.opt.RootNsid
fs.Debugf(f, "Overriding root namespace to %q", f.ns)
} else if strings.HasPrefix(root, "/") {
// If root starts with / then use the actual root // If root starts with / then use the actual root
if strings.HasPrefix(root, "/") {
var acc *users.FullAccount var acc *users.FullAccount
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
acc, err = f.users.GetCurrentAccount() acc, err = f.users.GetCurrentAccount()
@@ -660,126 +603,38 @@ func (f *Fs) setRoot(root string) {
} }
} }
type getMetadataResult struct {
entry files.IsMetadata
notFound bool
err error
}
// getMetadata gets the metadata for a file or directory // getMetadata gets the metadata for a file or directory
func (f *Fs) getMetadata(ctx context.Context, objPath string) (res getMetadataResult) { func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMetadata, notFound bool, err error) {
res.err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
res.entry, res.err = f.srv.GetMetadata(&files.GetMetadataArg{ entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
Path: f.opt.Enc.FromStandardPath(objPath), Path: f.opt.Enc.FromStandardPath(objPath),
}) })
return shouldRetry(ctx, res.err) return shouldRetry(ctx, err)
}) })
if res.err != nil { if err != nil {
switch e := res.err.(type) { switch e := err.(type) {
case files.GetMetadataAPIError: case files.GetMetadataAPIError:
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound { if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
res.notFound = true notFound = true
res.err = nil err = nil
} }
} }
} }
return return
} }
// Get metadata such that the result would be exported with the given extension
// Return a channel that will eventually receive the metadata
func (f *Fs) getMetadataForExt(ctx context.Context, filePath string, wantExportExtension exportExtension) chan getMetadataResult {
ch := make(chan getMetadataResult, 1)
wantDownloadable := (wantExportExtension == "")
go func() {
defer close(ch)
res := f.getMetadata(ctx, filePath)
info, ok := res.entry.(*files.FileMetadata)
if !ok { // Can't check anything about file, just return what we have
ch <- res
return
}
// Return notFound if downloadability or extension doesn't match
if wantDownloadable != info.IsDownloadable {
ch <- getMetadataResult{notFound: true}
return
}
if !info.IsDownloadable {
_, ext := f.chooseExportFormat(info)
if ext != wantExportExtension {
ch <- getMetadataResult{notFound: true}
return
}
}
// Return our real result or error
ch <- res
}()
return ch
}
// For a given rclone-path, figure out what the Dropbox-path may be, in order of preference.
// Multiple paths might be plausible, due to export path munging.
func (f *Fs) possibleMetadatas(ctx context.Context, filePath string) (ret []<-chan getMetadataResult) {
ret = []<-chan getMetadataResult{}
// Prefer an exact match
ret = append(ret, f.getMetadataForExt(ctx, filePath, ""))
// Check if we're plausibly an export path, otherwise we're done
if f.opt.SkipExports || f.opt.ShowAllExports {
return
}
dotted := path.Ext(filePath)
if dotted == "" {
return
}
ext := exportExtension(dotted[1:])
if exportKnownExtensions[ext] == "" {
return
}
// We might be an export path! Try all possibilities
base := strings.TrimSuffix(filePath, dotted)
// `foo.papert.md` will only come from `foo.papert`. Never check something like `foo.papert.paper`
if strings.HasSuffix(base, paperTemplateExtension) {
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
return
}
// Otherwise, try both `foo.md` coming from `foo`, or from `foo.paper`
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
ret = append(ret, f.getMetadataForExt(ctx, base+paperExtension, ext))
return
}
// getFileMetadata gets the metadata for a file // getFileMetadata gets the metadata for a file
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileMetadata, error) { func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *files.FileMetadata, err error) {
var res getMetadataResult entry, notFound, err := f.getMetadata(ctx, filePath)
if err != nil {
// Try all possible metadatas return nil, err
possibleMetadatas := f.possibleMetadatas(ctx, filePath)
for _, ch := range possibleMetadatas {
res = <-ch
if res.err != nil {
return nil, res.err
} }
if !res.notFound { if notFound {
break
}
}
if res.notFound {
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
} }
fileInfo, ok := entry.(*files.FileMetadata)
fileInfo, ok := res.entry.(*files.FileMetadata)
if !ok { if !ok {
if _, ok = res.entry.(*files.FolderMetadata); ok { if _, ok = entry.(*files.FolderMetadata); ok {
return nil, fs.ErrorIsDir return nil, fs.ErrorIsDir
} }
return nil, fs.ErrorNotAFile return nil, fs.ErrorNotAFile
@@ -788,15 +643,15 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileM
} }
// getDirMetadata gets the metadata for a directory // getDirMetadata gets the metadata for a directory
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (*files.FolderMetadata, error) { func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files.FolderMetadata, err error) {
res := f.getMetadata(ctx, dirPath) entry, notFound, err := f.getMetadata(ctx, dirPath)
if res.err != nil { if err != nil {
return nil, res.err return nil, err
} }
if res.notFound { if notFound {
return nil, fs.ErrorDirNotFound return nil, fs.ErrorDirNotFound
} }
dirInfo, ok := res.entry.(*files.FolderMetadata) dirInfo, ok := entry.(*files.FolderMetadata)
if !ok { if !ok {
return nil, fs.ErrorIsFile return nil, fs.ErrorIsFile
} }
@@ -832,7 +687,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil) return f.newObjectWithInfo(ctx, remote, nil)
} }
// listSharedFolders lists all available shared folders mounted and not mounted // listSharedFoldersApi lists all available shared folders mounted and not mounted
// we'll need the id later so we have to return them in original format // we'll need the id later so we have to return them in original format
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) { func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
started := false started := false
@@ -864,7 +719,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
} }
for _, entry := range res.Entries { for _, entry := range res.Entries {
leaf := f.opt.Enc.ToStandardName(entry.Name) leaf := f.opt.Enc.ToStandardName(entry.Name)
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId) d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
entries = append(entries, d) entries = append(entries, d)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -996,15 +851,16 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var res *files.ListFolderResult var res *files.ListFolderResult
for { for {
if !started { if !started {
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(root)) arg := files.ListFolderArg{
arg.Recursive = false Path: f.opt.Enc.FromStandardPath(root),
arg.Limit = 1000 Recursive: false,
Limit: 1000,
}
if root == "/" { if root == "/" {
arg.Path = "" // Specify root folder as empty string arg.Path = "" // Specify root folder as empty string
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(arg) res, err = f.srv.ListFolder(&arg)
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1050,18 +906,16 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath)) leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
remote := path.Join(dir, leaf) remote := path.Join(dir, leaf)
if folderInfo != nil { if folderInfo != nil {
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id) d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
entries = append(entries, d) entries = append(entries, d)
} else if fileInfo != nil { } else if fileInfo != nil {
o, err := f.newObjectWithInfo(ctx, remote, fileInfo) o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if o.(*Object).exportType.listable() {
entries = append(entries, o) entries = append(entries, o)
} }
} }
}
if !res.HasMore { if !res.HasMore {
break break
} }
@@ -1135,7 +989,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
if root == "/" { if root == "/" {
return errors.New("can't remove root directory") return errors.New("can't remove root directory")
} }
encRoot := f.opt.Enc.FromStandardPath(root)
if check { if check {
// check directory exists // check directory exists
@@ -1144,15 +997,18 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return fmt.Errorf("Rmdir: %w", err) return fmt.Errorf("Rmdir: %w", err)
} }
root = f.opt.Enc.FromStandardPath(root)
// check directory empty // check directory empty
arg := files.NewListFolderArg(encRoot) arg := files.ListFolderArg{
arg.Recursive = false Path: root,
Recursive: false,
}
if root == "/" { if root == "/" {
arg.Path = "" // Specify root folder as empty string arg.Path = "" // Specify root folder as empty string
} }
var res *files.ListFolderResult var res *files.ListFolderResult
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(arg) res, err = f.srv.ListFolder(&arg)
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1165,7 +1021,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
// remove it // remove it
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: encRoot}) _, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
return err return err
@@ -1195,20 +1051,13 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
// Find and remove existing object
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
if err != nil {
return nil, err
}
defer cleanup(&err)
// Temporary Object under construction // Temporary Object under construction
dstObj := &Object{ dstObj := &Object{
fs: f, fs: f,
@@ -1222,6 +1071,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()), ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
}, },
} }
var err error
var result *files.RelocationResult var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
result, err = f.srv.CopyV2(&arg) result, err = f.srv.CopyV2(&arg)
@@ -1333,16 +1183,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil && createArg.Settings.Expires != nil && strings.Contains(err.Error(), sharing.SharedLinkSettingsErrorNotAuthorized) {
// Some plans can't create links with expiry
fs.Debugf(absPath, "can't create link with expiry, trying without")
createArg.Settings.Expires = nil
err = f.pacer.Call(func() (bool, error) {
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
return shouldRetry(ctx, err)
})
}
if err != nil && strings.Contains(err.Error(), if err != nil && strings.Contains(err.Error(),
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) { sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
fs.Debugf(absPath, "has a public link already, attempting to retrieve it") fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
@@ -1434,21 +1274,18 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return nil, err return nil, err
} }
var total uint64 var total uint64
used := q.Used
if q.Allocation != nil { if q.Allocation != nil {
if q.Allocation.Individual != nil { if q.Allocation.Individual != nil {
total += q.Allocation.Individual.Allocated total += q.Allocation.Individual.Allocated
} }
if q.Allocation.Team != nil { if q.Allocation.Team != nil {
total += q.Allocation.Team.Allocated total += q.Allocation.Team.Allocated
// Override used with Team.Used as this includes q.Used already
used = q.Allocation.Team.Used
} }
} }
usage = &fs.Usage{ usage = &fs.Usage{
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
Used: fs.NewUsageValue(int64(used)), // bytes in use Used: fs.NewUsageValue(int64(q.Used)), // bytes in use
Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota Free: fs.NewUsageValue(int64(total - q.Used)), // bytes which can be uploaded before reaching the quota
} }
return usage, nil return usage, nil
} }
@@ -1507,14 +1344,16 @@ func (f *Fs) changeNotifyCursor(ctx context.Context) (cursor string, err error)
var startCursor *files.ListFolderGetLatestCursorResult var startCursor *files.ListFolderGetLatestCursorResult
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(f.slashRoot)) arg := files.ListFolderArg{
arg.Recursive = true Path: f.opt.Enc.FromStandardPath(f.slashRoot),
Recursive: true,
}
if arg.Path == "/" { if arg.Path == "/" {
arg.Path = "" arg.Path = ""
} }
startCursor, err = f.srv.ListFolderGetLatestCursor(arg) startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
@@ -1618,50 +1457,8 @@ func (f *Fs) Shutdown(ctx context.Context) error {
return nil return nil
} }
func (f *Fs) chooseExportFormat(info *files.FileMetadata) (exportAPIFormat, exportExtension) {
// Find API export formats Dropbox supports for this file
// Sometimes Dropbox lists a format in ExportAs but not ExportOptions, so check both
ei := info.ExportInfo
dropboxFormatStrings := append([]string{ei.ExportAs}, ei.ExportOptions...)
// Find which extensions these correspond to
exportExtensions := map[exportExtension]exportAPIFormat{}
var dropboxPreferredAPIFormat exportAPIFormat
var dropboxPreferredExtension exportExtension
for _, format := range dropboxFormatStrings {
apiFormat := exportAPIFormat(format)
// Only consider formats we know about
if ext, ok := exportKnownAPIFormats[apiFormat]; ok {
if dropboxPreferredAPIFormat == "" {
dropboxPreferredAPIFormat = apiFormat
dropboxPreferredExtension = ext
}
exportExtensions[ext] = apiFormat
}
}
// See if the user picked a valid extension
for _, ext := range f.exportExts {
if apiFormat, ok := exportExtensions[ext]; ok {
return apiFormat, ext
}
}
// If no matches, prefer the first valid format Dropbox lists
return dropboxPreferredAPIFormat, dropboxPreferredExtension
}
// ------------------------------------------------------------ // ------------------------------------------------------------
func (et exportType) listable() bool {
return et != exportHide
}
// something we should _try_ to export
func (et exportType) exportable() bool {
return et == exportExportable || et == exportListOnly
}
// Fs returns the parent Fs // Fs returns the parent Fs
func (o *Object) Fs() fs.Info { func (o *Object) Fs() fs.Info {
return o.fs return o.fs
@@ -1705,32 +1502,6 @@ func (o *Object) Size() int64 {
return o.bytes return o.bytes
} }
func (o *Object) setMetadataForExport(info *files.FileMetadata) {
o.bytes = -1
o.hash = ""
if o.fs.opt.SkipExports {
o.exportType = exportHide
return
}
if o.fs.opt.ShowAllExports {
o.exportType = exportListOnly
return
}
var exportExt exportExtension
o.exportAPIFormat, exportExt = o.fs.chooseExportFormat(info)
if o.exportAPIFormat == "" {
o.exportType = exportHide
} else {
o.exportType = exportExportable
// get rid of any paper extension, if present
o.remote = strings.TrimSuffix(o.remote, paperExtension)
// add the export extension
o.remote += "." + string(exportExt)
}
}
// setMetadataFromEntry sets the fs data from a files.FileMetadata // setMetadataFromEntry sets the fs data from a files.FileMetadata
// //
// This isn't a complete set of metadata and has an inaccurate date // This isn't a complete set of metadata and has an inaccurate date
@@ -1739,10 +1510,6 @@ func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
o.bytes = int64(info.Size) o.bytes = int64(info.Size)
o.modTime = info.ClientModified o.modTime = info.ClientModified
o.hash = info.ContentHash o.hash = info.ContentHash
if !info.IsDownloadable {
o.setMetadataForExport(info)
}
return nil return nil
} }
@@ -1806,27 +1573,6 @@ func (o *Object) Storable() bool {
return true return true
} }
func (o *Object) export(ctx context.Context) (in io.ReadCloser, err error) {
if o.exportType == exportListOnly || o.exportAPIFormat == "" {
fs.Debugf(o.remote, "No export format found")
return nil, fs.ErrorObjectNotFound
}
arg := files.ExportArg{Path: o.id, ExportFormat: string(o.exportAPIFormat)}
var exportResult *files.ExportResult
err = o.fs.pacer.Call(func() (bool, error) {
exportResult, in, err = o.fs.srv.Export(&arg)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
o.bytes = int64(exportResult.ExportMetadata.Size)
o.hash = exportResult.ExportMetadata.ExportHash
return
}
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.SharedFiles { if o.fs.opt.SharedFiles {
@@ -1846,10 +1592,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return return
} }
if o.exportType.exportable() {
return o.export(ctx)
}
fs.FixRangeOption(options, o.bytes) fs.FixRangeOption(options, o.bytes)
headers := fs.OpenOptionHeaders(options) headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{ arg := files.DownloadArg{
@@ -1973,15 +1715,19 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
// If we are batching then we should have written all the data now // If we are batching then we should have written all the data now
// store the commit info now for a batch commit // store the commit info now for a batch commit
if o.fs.batcher.Batching() { if o.fs.batcher.Batching() {
return o.fs.batcher.Commit(ctx, o.remote, args) return o.fs.batcher.Commit(ctx, args)
} }
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
entry, err = o.fs.srv.UploadSessionFinish(args, nil) entry, err = o.fs.srv.UploadSessionFinish(args, nil)
if retry, err := shouldRetryExclude(ctx, err); !retry { // If error is insufficient space then don't retry
return retry, err if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
} }
// after the first chunk is uploaded, we retry everything except the excluded errors }
// after the first chunk is uploaded, we retry everything
return err != nil, err return err != nil, err
}) })
if err != nil { if err != nil {

View File

@@ -1,16 +1,9 @@
package dropbox package dropbox
import ( import (
"context"
"io"
"strings"
"testing" "testing"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestInternalCheckPathLength(t *testing.T) { func TestInternalCheckPathLength(t *testing.T) {
@@ -49,54 +42,3 @@ func TestInternalCheckPathLength(t *testing.T) {
assert.Equal(t, test.ok, err == nil, test.in) assert.Equal(t, test.ok, err == nil, test.in)
} }
} }
func (f *Fs) importPaperForTest(t *testing.T) {
content := `# test doc
Lorem ipsum __dolor__ sit amet
[link](http://google.com)
`
arg := files.PaperCreateArg{
Path: f.slashRootSlash + "export.paper",
ImportFormat: &files.ImportFormat{Tagged: dropbox.Tagged{Tag: files.ImportFormatMarkdown}},
}
var err error
err = f.pacer.Call(func() (bool, error) {
reader := strings.NewReader(content)
_, err = f.srv.PaperCreate(&arg, reader)
return shouldRetry(context.Background(), err)
})
require.NoError(t, err)
}
func (f *Fs) InternalTestPaperExport(t *testing.T) {
ctx := context.Background()
f.importPaperForTest(t)
f.exportExts = []exportExtension{"html"}
obj, err := f.NewObject(ctx, "export.html")
require.NoError(t, err)
rc, err := obj.Open(ctx)
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
buf, err := io.ReadAll(rc)
require.NoError(t, err)
text := string(buf)
for _, excerpt := range []string{
"Lorem ipsum",
"<b>dolor</b>",
`href="http://google.com"`,
} {
require.Contains(t, text, excerpt)
}
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PaperExport", f.InternalTestPaperExport)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -28,14 +28,14 @@ var retryErrorCodes = []int{
509, // Bandwidth Limit Exceeded 509, // Bandwidth Limit Exceeded
} }
var errorRegex = regexp.MustCompile(`#(\d{1,3})`) var errorRegex = regexp.MustCompile(`#\d{1,3}`)
func parseFichierError(err error) int { func parseFichierError(err error) int {
matches := errorRegex.FindStringSubmatch(err.Error()) matches := errorRegex.FindStringSubmatch(err.Error())
if len(matches) == 0 { if len(matches) == 0 {
return 0 return 0
} }
code, err := strconv.Atoi(matches[1]) code, err := strconv.Atoi(matches[0])
if err != nil { if err != nil {
fs.Debugf(nil, "failed parsing fichier error: %v", err) fs.Debugf(nil, "failed parsing fichier error: %v", err)
return 0 return 0
@@ -61,7 +61,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
return false, err // No such user return false, err // No such user
case 186: case 186:
return false, err // IP blocked? return false, err // IP blocked?
case 374, 412: // Flood detected seems to be #412 now case 374:
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err) fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
time.Sleep(30 * time.Second) time.Sleep(30 * time.Second)
default: default:
@@ -118,9 +118,6 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
Single: 1, Single: 1,
Pass: f.opt.FilePassword, Pass: f.opt.FilePassword,
} }
if f.opt.CDN {
request.CDN = 1
}
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/download/get_token.cgi", Path: "/download/get_token.cgi",
@@ -408,32 +405,6 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
return response, nil return response, nil
} }
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
request := &MoveDirRequest{
FolderID: folderID,
DestinationFolderID: destinationFolderID,
Rename: newLeaf,
// DestinationUser: destinationUser,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mv.cgi",
}
response = &MoveDirResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("couldn't move dir: %w", err)
}
return response, nil
}
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) { func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
request := &CopyFileRequest{ request := &CopyFileRequest{
URLs: []string{url}, URLs: []string{url},
@@ -502,7 +473,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("didn't get an upload node: %w", err) return nil, fmt.Errorf("didnt got an upload node: %w", err)
} }
// fs.Debugf(f, "Got Upload node") // fs.Debugf(f, "Got Upload node")

View File

@@ -40,7 +40,6 @@ func init() {
Options: []fs.Option{{ Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.", Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key", Name: "api_key",
Sensitive: true,
}, { }, {
Help: "If you want to download a shared folder, add this parameter.", Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder", Name: "shared_folder",
@@ -55,11 +54,6 @@ func init() {
Name: "folder_password", Name: "folder_password",
Advanced: true, Advanced: true,
IsPassword: true, IsPassword: true,
}, {
Help: "Set if you wish to use CDN download links.",
Name: "cdn",
Default: false,
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -95,7 +89,6 @@ type Options struct {
SharedFolder string `config:"shared_folder"` SharedFolder string `config:"shared_folder"`
FilePassword string `config:"file_password"` FilePassword string `config:"file_password"`
FolderPassword string `config:"folder_password"` FolderPassword string `config:"folder_password"`
CDN bool `config:"cdn"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -340,7 +333,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// checking to see if there is one already - use Put() for that. // checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(300e9) { if size > int64(300e9) {
return nil, errors.New("File too big, can't upload") return nil, errors.New("File too big, cant upload")
} else if size == 0 { } else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles return nil, fs.ErrorCantUploadEmptyFiles
} }
@@ -441,28 +434,23 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
srcFs := srcObj.fs
// Find current directory ID // Find current directory ID
srcLeaf, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcObj.remote, false) _, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Create temporary object // Create temporary object
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote) dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// If it is in the correct directory, just rename it // If it is in the correct directory, just rename it
var url string var url string
if srcDirectoryID == dstDirectoryID { if currentDirectoryID == directoryID {
// No rename needed resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
if srcLeaf == dstLeaf {
return src, nil
}
resp, err := f.renameFile(ctx, srcObj.file.URL, dstLeaf)
if err != nil { if err != nil {
return nil, fmt.Errorf("couldn't rename file: %w", err) return nil, fmt.Errorf("couldn't rename file: %w", err)
} }
@@ -471,16 +459,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
url = resp.URLs[0].URL url = resp.URLs[0].URL
} else { } else {
dstFolderID, err := strconv.Atoi(dstDirectoryID) folderID, err := strconv.Atoi(directoryID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rename := dstLeaf resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
// No rename needed
if srcLeaf == dstLeaf {
rename = ""
}
resp, err := f.moveFile(ctx, srcObj.file.URL, dstFolderID, rename)
if err != nil { if err != nil {
return nil, fmt.Errorf("couldn't move file: %w", err) return nil, fmt.Errorf("couldn't move file: %w", err)
} }
@@ -498,51 +481,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil return dstObj, nil
} }
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove.
//
// If destination exists then return fs.ErrorDirExists.
//
// This is complicated by the fact that we can't use moveDir to move
// to a different directory AND rename at the same time as it can
// overwrite files in the source directory.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
if err != nil {
return err
}
srcIDnumeric, err := strconv.Atoi(srcID)
if err != nil {
return err
}
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
if err != nil {
return err
}
var resp *MoveDirResponse
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
if err != nil {
return fmt.Errorf("couldn't rename leaf: %w", err)
}
if resp.Status != "OK" {
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
}
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// Copy src to this remote using server side move operations. // Copy src to this remote using server side move operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
@@ -616,7 +554,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil) _ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil) _ fs.Copier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil)

View File

@@ -20,7 +20,6 @@ type DownloadRequest struct {
URL string `json:"url"` URL string `json:"url"`
Single int `json:"single"` Single int `json:"single"`
Pass string `json:"pass,omitempty"` Pass string `json:"pass,omitempty"`
CDN int `json:"cdn,omitempty"`
} }
// RemoveFolderRequest is the request structure of the corresponding request // RemoveFolderRequest is the request structure of the corresponding request
@@ -70,22 +69,6 @@ type MoveFileResponse struct {
URLs []string `json:"urls"` URLs []string `json:"urls"`
} }
// MoveDirRequest is the request structure of the corresponding request
type MoveDirRequest struct {
FolderID int `json:"folder_id"`
DestinationFolderID int `json:"destination_folder_id,omitempty"`
DestinationUser string `json:"destination_user"`
Rename string `json:"rename,omitempty"`
}
// MoveDirResponse is the response structure of the corresponding request
type MoveDirResponse struct {
Status string `json:"status"`
Message string `json:"message"`
OldName string `json:"old_name"`
NewName string `json:"new_name"`
}
// CopyFileRequest is the request structure of the corresponding request // CopyFileRequest is the request structure of the corresponding request
type CopyFileRequest struct { type CopyFileRequest struct {
URLs []string `json:"urls"` URLs []string `json:"urls"`

View File

@@ -216,11 +216,11 @@ var ItemFields = mustFields(Item{})
// fields returns the JSON fields in use by opt as a | separated // fields returns the JSON fields in use by opt as a | separated
// string. // string.
func fields(opt any) (pipeTags string, err error) { func fields(opt interface{}) (pipeTags string, err error) {
var tags []string var tags []string
def := reflect.ValueOf(opt) def := reflect.ValueOf(opt)
defType := def.Type() defType := def.Type()
for i := range def.NumField() { for i := 0; i < def.NumField(); i++ {
field := defType.Field(i) field := defType.Field(i)
tag, ok := field.Tag.Lookup("json") tag, ok := field.Tag.Lookup("json")
if !ok { if !ok {
@@ -239,7 +239,7 @@ func fields(opt any) (pipeTags string, err error) {
// mustFields returns the JSON fields in use by opt as a | separated // mustFields returns the JSON fields in use by opt as a | separated
// string. It panics on failure. // string. It panics on failure.
func mustFields(opt any) string { func mustFields(opt interface{}) string {
tags, err := fields(opt) tags, err := fields(opt)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -352,7 +352,7 @@ type SpaceInfo struct {
type DeleteResponse struct { type DeleteResponse struct {
Status Status
Deleted []string `json:"deleted"` Deleted []string `json:"deleted"`
Errors []any `json:"errors"` Errors []interface{} `json:"errors"`
ID string `json:"fi_id"` ID string `json:"fi_id"`
BackgroundTask int `json:"backgroundtask"` BackgroundTask int `json:"backgroundtask"`
UsSize string `json:"us_size"` UsSize string `json:"us_size"`

View File

@@ -20,6 +20,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@@ -84,7 +85,6 @@ Leave blank normally.
Fill in to make rclone start with directory of a given ID. Fill in to make rclone start with directory of a given ID.
`, `,
Sensitive: true,
}, { }, {
Name: "permanent_token", Name: "permanent_token",
Help: `Permanent Authentication Token. Help: `Permanent Authentication Token.
@@ -98,7 +98,6 @@ These tokens are normally valid for several years.
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
`, `,
Sensitive: true,
}, { }, {
Name: "token", Name: "token",
Help: `Session Token. Help: `Session Token.
@@ -109,7 +108,6 @@ usually valid for 1 hour.
Don't set this value - rclone will set it automatically. Don't set this value - rclone will set it automatically.
`, `,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "token_expiry", Name: "token_expiry",
Help: `Token expiry time. Help: `Token expiry time.
@@ -158,7 +156,7 @@ type Fs struct {
tokenMu sync.Mutex // hold when reading the token tokenMu sync.Mutex // hold when reading the token
token string // current access token token string // current access token
tokenExpiry time.Time // time the current token expires tokenExpiry time.Time // time the current token expires
tokenExpired atomic.Int32 tokenExpired int32 // read and written with atomic
canCopyWithName bool // set if detected that can use fi_name in copy canCopyWithName bool // set if detected that can use fi_name in copy
precision time.Duration // precision reported precision time.Duration // precision reported
} }
@@ -243,7 +241,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
err = status // return the error from the RPC err = status // return the error from the RPC
code := status.GetCode() code := status.GetCode()
if code == "login_token_expired" { if code == "login_token_expired" {
f.tokenExpired.Add(1) atomic.AddInt32(&f.tokenExpired, 1)
} else { } else {
for _, retryCode := range retryStatusCodes { for _, retryCode := range retryStatusCodes {
if code == retryCode.code { if code == retryCode.code {
@@ -323,12 +321,12 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
var refreshed = false var refreshed = false
defer func() { defer func() {
if refreshed { if refreshed {
f.tokenExpired.Store(0) atomic.StoreInt32(&f.tokenExpired, 0)
} }
f.tokenMu.Unlock() f.tokenMu.Unlock()
}() }()
expired := f.tokenExpired.Load() != 0 expired := atomic.LoadInt32(&f.tokenExpired) != 0
if expired { if expired {
fs.Debugf(f, "Token invalid - refreshing") fs.Debugf(f, "Token invalid - refreshing")
} }
@@ -371,7 +369,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
} }
// params for rpc // params for rpc
type params map[string]any type params map[string]interface{}
// rpc calls the rpc.php method of the SME file fabric // rpc calls the rpc.php method of the SME file fabric
// //
@@ -1188,7 +1186,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, errors.New("can't download - no id") return nil, errors.New("can't download - no id")
} }
if o.contentType == emptyMimeType { if o.contentType == emptyMimeType {
return io.NopCloser(bytes.NewReader([]byte{})), nil return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
} }
fs.FixRangeOption(options, o.size) fs.FixRangeOption(options, o.size)
resp, err := o.fs.rpc(ctx, "getFile", params{ resp, err := o.fs.rpc(ctx, "getFile", params{

View File

@@ -1,81 +0,0 @@
// Package api defines types for interacting with the FileLu API.
package api
import "encoding/json"
// CreateFolderResponse represents the response for creating a folder.
type CreateFolderResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result struct {
FldID interface{} `json:"fld_id"`
} `json:"result"`
}
// DeleteFolderResponse represents the response for deleting a folder.
type DeleteFolderResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
}
// FolderListResponse represents the response for listing folders.
type FolderListResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result struct {
Files []struct {
Name string `json:"name"`
FldID json.Number `json:"fld_id"`
Path string `json:"path"`
FileCode string `json:"file_code"`
Size int64 `json:"size"`
} `json:"files"`
Folders []struct {
Name string `json:"name"`
FldID json.Number `json:"fld_id"`
Path string `json:"path"`
} `json:"folders"`
} `json:"result"`
}
// FileDirectLinkResponse represents the response for a direct link to a file.
type FileDirectLinkResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result struct {
URL string `json:"url"`
Size int64 `json:"size"`
} `json:"result"`
}
// FileInfoResponse represents the response for file information.
type FileInfoResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result []struct {
Size string `json:"size"`
Name string `json:"name"`
FileCode string `json:"filecode"`
Hash string `json:"hash"`
Status int `json:"status"`
} `json:"result"`
}
// DeleteFileResponse represents the response for deleting a file.
type DeleteFileResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
}
// AccountInfoResponse represents the response for account information.
type AccountInfoResponse struct {
Status int `json:"status"` // HTTP status code of the response.
Msg string `json:"msg"` // Message describing the response.
Result struct {
PremiumExpire string `json:"premium_expire"` // Expiration date of premium access.
Email string `json:"email"` // User's email address.
UType string `json:"utype"` // User type (e.g., premium or free).
Storage string `json:"storage"` // Total storage available to the user.
StorageUsed string `json:"storage_used"` // Amount of storage used.
} `json:"result"` // Nested result structure containing account details.
}

Some files were not shown because too many files have changed in this diff Show More