mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
41 Commits
feat/cache
...
v1.64-stab
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0b2e17b396 | ||
|
|
4325a7c362 | ||
|
|
2730e9ff08 | ||
|
|
929d8b8a6d | ||
|
|
583eee635f | ||
|
|
ba836e729e | ||
|
|
a6db2e7320 | ||
|
|
31486aa7e3 | ||
|
|
d24e5bc7e4 | ||
|
|
077c1a0f57 | ||
|
|
3bb82b4dd5 | ||
|
|
1591592936 | ||
|
|
cc036884d4 | ||
|
|
f6b9fdf7c6 | ||
|
|
c9fe2f75a8 | ||
|
|
340a67c012 | ||
|
|
264b3f0c90 | ||
|
|
a7978cea56 | ||
|
|
bebd82c586 | ||
|
|
af02c3b2a7 | ||
|
|
77dfe5f1fd | ||
|
|
e9a95a78de | ||
|
|
82ca5295f4 | ||
|
|
9d8a40b813 | ||
|
|
12d80c5219 | ||
|
|
038a87c569 | ||
|
|
3ef97993ad | ||
|
|
04bba67cd5 | ||
|
|
29dd29b9f3 | ||
|
|
532248352b | ||
|
|
ab803942de | ||
|
|
f933e80258 | ||
|
|
1c6f0101a5 | ||
|
|
c6f161de90 | ||
|
|
bdcf7fe28c | ||
|
|
776dc47eb8 | ||
|
|
167046e21a | ||
|
|
98d50d545a | ||
|
|
48242c5357 | ||
|
|
e437e6c209 | ||
|
|
b813a01718 |
4
.gitattributes
vendored
4
.gitattributes
vendored
@@ -1,7 +1,3 @@
|
|||||||
# Go writes go.mod and go.sum with lf even on windows
|
|
||||||
go.mod text eol=lf
|
|
||||||
go.sum text eol=lf
|
|
||||||
|
|
||||||
# Ignore generated files in GitHub language statistics and diffs
|
# Ignore generated files in GitHub language statistics and diffs
|
||||||
/MANUAL.* linguist-generated=true
|
/MANUAL.* linguist-generated=true
|
||||||
/rclone.1 linguist-generated=true
|
/rclone.1 linguist-generated=true
|
||||||
|
|||||||
156
.github/workflows/build.yml
vendored
156
.github/workflows/build.yml
vendored
@@ -17,24 +17,22 @@ on:
|
|||||||
manual:
|
manual:
|
||||||
description: Manual run (bypass default conditions)
|
description: Manual run (bypass default conditions)
|
||||||
type: boolean
|
type: boolean
|
||||||
|
required: true
|
||||||
default: true
|
default: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.23']
|
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- job_name: linux
|
- job_name: linux
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '1.21'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^linux/"'
|
build_flags: '-include "^linux/"'
|
||||||
check: true
|
check: true
|
||||||
@@ -45,14 +43,14 @@ jobs:
|
|||||||
|
|
||||||
- job_name: linux_386
|
- job_name: linux_386
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '1.21'
|
||||||
goarch: 386
|
goarch: 386
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
quicktest: true
|
quicktest: true
|
||||||
|
|
||||||
- job_name: mac_amd64
|
- job_name: mac_amd64
|
||||||
os: macos-latest
|
os: macos-11
|
||||||
go: '>=1.24.0-rc.1'
|
go: '1.21'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/amd64" -cgo'
|
build_flags: '-include "^darwin/amd64" -cgo'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
@@ -60,15 +58,15 @@ jobs:
|
|||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: mac_arm64
|
- job_name: mac_arm64
|
||||||
os: macos-latest
|
os: macos-11
|
||||||
go: '>=1.24.0-rc.1'
|
go: '1.21'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: windows
|
- job_name: windows
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '1.21'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
cgo: '0'
|
cgo: '0'
|
||||||
build_flags: '-include "^windows/"'
|
build_flags: '-include "^windows/"'
|
||||||
@@ -78,14 +76,20 @@ jobs:
|
|||||||
|
|
||||||
- job_name: other_os
|
- job_name: other_os
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '1.21'
|
||||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||||
compile_all: true
|
compile_all: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: go1.23
|
- job_name: go1.19
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.23'
|
go: '1.19'
|
||||||
|
quicktest: true
|
||||||
|
racequicktest: true
|
||||||
|
|
||||||
|
- job_name: go1.20
|
||||||
|
os: ubuntu-latest
|
||||||
|
go: '1.20'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
@@ -100,12 +104,13 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Set environment variables
|
- name: Set environment variables
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||||
@@ -114,15 +119,16 @@ jobs:
|
|||||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||||
|
|
||||||
- name: Install Libraries on Linux
|
- name: Install Libraries on Linux
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo modprobe fuse
|
sudo modprobe fuse
|
||||||
sudo chmod 666 /dev/fuse
|
sudo chmod 666 /dev/fuse
|
||||||
sudo chown root:$USER /etc/fuse.conf
|
sudo chown root:$USER /etc/fuse.conf
|
||||||
sudo apt-get update
|
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
|
||||||
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
|
|
||||||
- name: Install Libraries on macOS
|
- name: Install Libraries on macOS
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
|
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
|
||||||
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
|
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
|
||||||
@@ -131,8 +137,7 @@ jobs:
|
|||||||
brew untap --force homebrew/cask
|
brew untap --force homebrew/cask
|
||||||
brew update
|
brew update
|
||||||
brew install --cask macfuse
|
brew install --cask macfuse
|
||||||
brew install git-annex git-annex-remote-rclone
|
if: matrix.os == 'macos-11'
|
||||||
if: matrix.os == 'macos-latest'
|
|
||||||
|
|
||||||
- name: Install Libraries on Windows
|
- name: Install Libraries on Windows
|
||||||
shell: powershell
|
shell: powershell
|
||||||
@@ -151,6 +156,7 @@ jobs:
|
|||||||
if: matrix.os == 'windows-latest'
|
if: matrix.os == 'windows-latest'
|
||||||
|
|
||||||
- name: Print Go version and environment
|
- name: Print Go version and environment
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
printf "Using go at: $(which go)\n"
|
printf "Using go at: $(which go)\n"
|
||||||
printf "Go version: $(go version)\n"
|
printf "Go version: $(go version)\n"
|
||||||
@@ -161,25 +167,38 @@ jobs:
|
|||||||
printf "\n\nSystem environment:\n\n"
|
printf "\n\nSystem environment:\n\n"
|
||||||
env
|
env
|
||||||
|
|
||||||
|
- name: Go module cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
- name: Build rclone
|
- name: Build rclone
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
|
|
||||||
- name: Rclone version
|
- name: Rclone version
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
rclone version
|
rclone version
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make quicktest
|
make quicktest
|
||||||
if: matrix.quicktest
|
if: matrix.quicktest
|
||||||
|
|
||||||
- name: Race test
|
- name: Race test
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make racequicktest
|
make racequicktest
|
||||||
if: matrix.racequicktest
|
if: matrix.racequicktest
|
||||||
|
|
||||||
- name: Run librclone tests
|
- name: Run librclone tests
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make -C librclone/ctest test
|
make -C librclone/ctest test
|
||||||
make -C librclone/ctest clean
|
make -C librclone/ctest clean
|
||||||
@@ -187,14 +206,17 @@ jobs:
|
|||||||
if: matrix.librclonetest
|
if: matrix.librclonetest
|
||||||
|
|
||||||
- name: Compile all architectures test
|
- name: Compile all architectures test
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
make compile_all
|
make compile_all
|
||||||
if: matrix.compile_all
|
if: matrix.compile_all
|
||||||
|
|
||||||
- name: Deploy built binaries
|
- name: Deploy built binaries
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
||||||
|
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
|
||||||
make ci_beta
|
make ci_beta
|
||||||
env:
|
env:
|
||||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||||
@@ -203,78 +225,27 @@ jobs:
|
|||||||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
name: "lint"
|
name: "lint"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Get runner parameters
|
|
||||||
id: get-runner-parameters
|
|
||||||
run: |
|
|
||||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
|
||||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
|
- name: Code quality test
|
||||||
|
uses: golangci/golangci-lint-action@v3
|
||||||
|
with:
|
||||||
|
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||||
|
version: latest
|
||||||
|
|
||||||
|
# Run govulncheck on the latest go version, the one we build binaries with
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
id: setup-go
|
uses: actions/setup-go@v4
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
with:
|
||||||
go-version: '>=1.23.0-rc.1'
|
go-version: '1.21'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
cache: false
|
|
||||||
|
|
||||||
- name: Cache
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/go/pkg/mod
|
|
||||||
~/.cache/go-build
|
|
||||||
~/.cache/golangci-lint
|
|
||||||
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
|
|
||||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
|
||||||
|
|
||||||
- name: Code quality test (Linux)
|
|
||||||
uses: golangci/golangci-lint-action@v6
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (Windows)
|
|
||||||
uses: golangci/golangci-lint-action@v6
|
|
||||||
env:
|
|
||||||
GOOS: "windows"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (macOS)
|
|
||||||
uses: golangci/golangci-lint-action@v6
|
|
||||||
env:
|
|
||||||
GOOS: "darwin"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (FreeBSD)
|
|
||||||
uses: golangci/golangci-lint-action@v6
|
|
||||||
env:
|
|
||||||
GOOS: "freebsd"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (OpenBSD)
|
|
||||||
uses: golangci/golangci-lint-action@v6
|
|
||||||
env:
|
|
||||||
GOOS: "openbsd"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Install govulncheck
|
- name: Install govulncheck
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
@@ -282,12 +253,8 @@ jobs:
|
|||||||
- name: Scan for vulnerabilities
|
- name: Scan for vulnerabilities
|
||||||
run: govulncheck ./...
|
run: govulncheck ./...
|
||||||
|
|
||||||
- name: Scan edits of autogenerated files
|
|
||||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
|
||||||
if: github.event_name == 'pull_request'
|
|
||||||
|
|
||||||
android:
|
android:
|
||||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
name: "android-all"
|
name: "android-all"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -300,11 +267,20 @@ jobs:
|
|||||||
|
|
||||||
# Upgrade together with NDK version
|
# Upgrade together with NDK version
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: '>=1.24.0-rc.1'
|
go-version: '1.21'
|
||||||
|
|
||||||
|
- name: Go module cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
- name: Set global environment variables
|
- name: Set global environment variables
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||||
|
|
||||||
@@ -323,6 +299,7 @@ jobs:
|
|||||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||||
|
|
||||||
- name: arm-v7a Set environment variables
|
- name: arm-v7a Set environment variables
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
@@ -336,6 +313,7 @@ jobs:
|
|||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
||||||
|
|
||||||
- name: arm64-v8a Set environment variables
|
- name: arm64-v8a Set environment variables
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
@@ -348,6 +326,7 @@ jobs:
|
|||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
||||||
|
|
||||||
- name: x86 Set environment variables
|
- name: x86 Set environment variables
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
@@ -360,6 +339,7 @@ jobs:
|
|||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
||||||
|
|
||||||
- name: x64 Set environment variables
|
- name: x64 Set environment variables
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
|
|||||||
212
.github/workflows/build_android.yml
vendored
212
.github/workflows/build_android.yml
vendored
@@ -1,212 +0,0 @@
|
|||||||
---
|
|
||||||
# Github Actions build for rclone
|
|
||||||
# -*- compile-command: "yamllint -f parsable build_android.yml" -*-
|
|
||||||
|
|
||||||
name: Build & Push Android Builds
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
# Trigger the workflow on push or pull request
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- '**'
|
|
||||||
tags:
|
|
||||||
- '**'
|
|
||||||
pull_request:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
manual:
|
|
||||||
description: Manual run (bypass default conditions)
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
android:
|
|
||||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
|
||||||
timeout-minutes: 30
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- job_name: android-all
|
|
||||||
platform: linux/amd64/android/go1.24
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '>=1.24.0-rc.1'
|
|
||||||
|
|
||||||
name: ${{ matrix.job_name }}
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout Repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
# Upgrade together with NDK version
|
|
||||||
- name: Install Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: ${{ matrix.go }}
|
|
||||||
check-latest: true
|
|
||||||
cache: false
|
|
||||||
|
|
||||||
- name: Set Environment Variables
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_ENV
|
|
||||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_ENV
|
|
||||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Set PLATFORM Variable
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
platform=${{ matrix.platform }}
|
|
||||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Get ImageOS
|
|
||||||
# There's no way around this, because "ImageOS" is only available to
|
|
||||||
# processes, but the setup-go action uses it in its key.
|
|
||||||
id: imageos
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
result-encoding: string
|
|
||||||
script: |
|
|
||||||
return process.env.ImageOS
|
|
||||||
|
|
||||||
- name: Set CACHE_PREFIX Variable
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}
|
|
||||||
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Load Go Module Cache
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
${{ env.GOMODCACHE }}
|
|
||||||
key: ${{ env.CACHE_PREFIX }}-modcache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ env.CACHE_PREFIX }}-modcache
|
|
||||||
|
|
||||||
# Both load & update the cache when on default branch
|
|
||||||
- name: Load Go Build & Test Cache
|
|
||||||
id: go-cache
|
|
||||||
uses: actions/cache@v4
|
|
||||||
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request'
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
${{ env.GOCACHE }}
|
|
||||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ env.CACHE_PREFIX }}-cache
|
|
||||||
|
|
||||||
# Only load the cache when not on default branch
|
|
||||||
- name: Load Go Build & Test Cache
|
|
||||||
id: go-cache-restore
|
|
||||||
uses: actions/cache/restore@v4
|
|
||||||
if: github.ref_name != github.event.repository.default_branch || github.event_name == 'pull_request'
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
${{ env.GOCACHE }}
|
|
||||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ env.CACHE_PREFIX }}-cache
|
|
||||||
|
|
||||||
- name: Build Native rclone
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
make
|
|
||||||
|
|
||||||
- name: Install gomobile
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
go install golang.org/x/mobile/cmd/gobind@latest
|
|
||||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
|
||||||
env PATH=$PATH:~/go/bin gomobile init
|
|
||||||
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: arm-v7a - gomobile build
|
|
||||||
shell: bash
|
|
||||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
|
||||||
|
|
||||||
- name: arm-v7a - Set Environment Variables
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
|
||||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
|
||||||
echo 'GOARM=7' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: arm-v7a - Build
|
|
||||||
shell: bash
|
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
|
||||||
|
|
||||||
- name: arm64-v8a - Set Environment Variables
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
|
||||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: arm64-v8a - Build
|
|
||||||
shell: bash
|
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
|
||||||
|
|
||||||
- name: x86 - Set Environment Variables
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
|
||||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: x86 - Build
|
|
||||||
shell: bash
|
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
|
||||||
|
|
||||||
- name: x64 - Set Environment Variables
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
|
||||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: x64 - Build
|
|
||||||
shell: bash
|
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
|
|
||||||
|
|
||||||
- name: Delete Existing Cache
|
|
||||||
continue-on-error: true
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
run: |
|
|
||||||
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
|
|
||||||
for cache_id in "${cache_ids[@]}"; do
|
|
||||||
echo "Deleting Cache: $cache_id"
|
|
||||||
gh cache delete "$cache_id"
|
|
||||||
done
|
|
||||||
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request' && steps.go-cache.outputs.cache-hit != 'true'
|
|
||||||
|
|
||||||
- name: Deploy Built Binaries
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
make ci_upload
|
|
||||||
env:
|
|
||||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
|
||||||
# Upload artifacts if not a PR && not a fork
|
|
||||||
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
|
|
||||||
77
.github/workflows/build_publish_beta_docker_image.yml
vendored
Normal file
77
.github/workflows/build_publish_beta_docker_image.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
name: Docker beta build
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
if: github.repository == 'rclone/rclone'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Build image job
|
||||||
|
steps:
|
||||||
|
- name: Free some space
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
df -h .
|
||||||
|
# Remove android SDK
|
||||||
|
sudo rm -rf /usr/local/lib/android || true
|
||||||
|
# Remove .net runtime
|
||||||
|
sudo rm -rf /usr/share/dotnet || true
|
||||||
|
df -h .
|
||||||
|
- name: Checkout master
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
- name: Extract metadata (tags, labels) for Docker
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: ghcr.io/${{ github.repository }}
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
- name: Login to GitHub Container Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
# This is the user that triggered the Workflow. In this case, it will
|
||||||
|
# either be the user whom created the Release or manually triggered
|
||||||
|
# the workflow_dispatch.
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||||
|
# GitHub Actions at the start of a workflow run to identify the job.
|
||||||
|
# This is used to authenticate against GitHub Container Registry.
|
||||||
|
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||||
|
# for more detailed information.
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Show disk usage
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
df -h .
|
||||||
|
- name: Build and publish image
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
file: Dockerfile
|
||||||
|
context: .
|
||||||
|
push: true # push the image to ghcr
|
||||||
|
tags: |
|
||||||
|
ghcr.io/rclone/rclone:beta
|
||||||
|
rclone/rclone:beta
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||||
|
cache-from: type=gha, scope=${{ github.workflow }}
|
||||||
|
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
||||||
|
provenance: false
|
||||||
|
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||||
|
# https://github.com/docker/build-push-action/issues/252
|
||||||
|
- name: Show disk usage
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
df -h .
|
||||||
329
.github/workflows/build_publish_docker_image.yml
vendored
329
.github/workflows/build_publish_docker_image.yml
vendored
@@ -1,329 +0,0 @@
|
|||||||
---
|
|
||||||
# Github Actions release for rclone
|
|
||||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
|
|
||||||
|
|
||||||
name: Build & Push Docker Images
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
# Trigger the workflow on push or pull request
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- '**'
|
|
||||||
tags:
|
|
||||||
- '**'
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
manual:
|
|
||||||
description: Manual run (bypass default conditions)
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-image:
|
|
||||||
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
|
|
||||||
timeout-minutes: 60
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- platform: linux/amd64
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
- platform: linux/386
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
- platform: linux/arm64
|
|
||||||
runs-on: ubuntu-24.04-arm
|
|
||||||
- platform: linux/arm/v7
|
|
||||||
runs-on: ubuntu-24.04-arm
|
|
||||||
- platform: linux/arm/v6
|
|
||||||
runs-on: ubuntu-24.04-arm
|
|
||||||
|
|
||||||
name: Build Docker Image for ${{ matrix.platform }}
|
|
||||||
runs-on: ${{ matrix.runs-on }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout Repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Set REPO_NAME Variable
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
|
||||||
|
|
||||||
- name: Set PLATFORM Variable
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
platform=${{ matrix.platform }}
|
|
||||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Set CACHE_NAME Variable
|
|
||||||
shell: python
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
|
|
||||||
run: |
|
|
||||||
import os, re
|
|
||||||
|
|
||||||
def slugify(input_string, max_length=63):
|
|
||||||
slug = input_string.lower()
|
|
||||||
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
|
|
||||||
slug = slug.strip()
|
|
||||||
slug = re.sub(r'\s+', '-', slug)
|
|
||||||
slug = re.sub(r'-+', '-', slug)
|
|
||||||
slug = slug[:max_length]
|
|
||||||
slug = re.sub(r'[-]+$', '', slug)
|
|
||||||
return slug
|
|
||||||
|
|
||||||
ref_name_slug = "cache"
|
|
||||||
|
|
||||||
if os.environ.get("GITHUB_REF_NAME"):
|
|
||||||
if os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
|
||||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
|
||||||
elif os.environ['GITHUB_REF_NAME'] != os.environ['GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH']:
|
|
||||||
ref_name_slug += "-ref-" + slugify(os.environ['GITHUB_REF_NAME'])
|
|
||||||
|
|
||||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
|
||||||
env.write(f"CACHE_NAME={ref_name_slug}\n")
|
|
||||||
|
|
||||||
- name: Get ImageOS
|
|
||||||
# There's no way around this, because "ImageOS" is only available to
|
|
||||||
# processes, but the setup-go action uses it in its key.
|
|
||||||
id: imageos
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
result-encoding: string
|
|
||||||
script: |
|
|
||||||
return process.env.ImageOS
|
|
||||||
|
|
||||||
- name: Set CACHE_PREFIX Variable
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}-docker-go
|
|
||||||
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Extract Metadata (tags, labels) for Docker
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
env:
|
|
||||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
|
|
||||||
with:
|
|
||||||
images: |
|
|
||||||
ghcr.io/${{ env.REPO_NAME }}
|
|
||||||
labels: |
|
|
||||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
|
||||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
|
||||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
|
||||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
|
||||||
org.opencontainers.image.revision=${{ github.sha }}
|
|
||||||
tags: |
|
|
||||||
type=sha
|
|
||||||
type=ref,event=pr
|
|
||||||
type=ref,event=branch
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=semver,pattern={{major}}
|
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
|
||||||
type=raw,value=beta,enable={{is_default_branch}}
|
|
||||||
|
|
||||||
- name: Setup QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Load Go Build Cache for Docker
|
|
||||||
id: go-cache
|
|
||||||
uses: actions/cache@v4
|
|
||||||
if: github.ref_name == github.event.repository.default_branch
|
|
||||||
with:
|
|
||||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
|
||||||
path: |
|
|
||||||
/tmp/go-build-cache
|
|
||||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ env.CACHE_PREFIX }}-cache
|
|
||||||
|
|
||||||
- name: Load Go Build Cache for Docker
|
|
||||||
id: go-cache-restore
|
|
||||||
uses: actions/cache/restore@v4
|
|
||||||
if: github.ref_name != github.event.repository.default_branch
|
|
||||||
with:
|
|
||||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
|
||||||
path: |
|
|
||||||
/tmp/go-build-cache
|
|
||||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ env.CACHE_PREFIX }}-cache
|
|
||||||
|
|
||||||
- name: Inject Go Build Cache into Docker
|
|
||||||
uses: reproducible-containers/buildkit-cache-dance@v3
|
|
||||||
with:
|
|
||||||
cache-map: |
|
|
||||||
{
|
|
||||||
"/tmp/go-build-cache": "/root/.cache/go-build"
|
|
||||||
}
|
|
||||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit || steps.go-cache-restore.outputs.cache-hit }}
|
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
# This is the user that triggered the Workflow. In this case, it will
|
|
||||||
# either be the user whom created the Release or manually triggered
|
|
||||||
# the workflow_dispatch.
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build and Publish Image Digest
|
|
||||||
id: build
|
|
||||||
uses: docker/build-push-action@v6
|
|
||||||
with:
|
|
||||||
file: Dockerfile
|
|
||||||
context: .
|
|
||||||
provenance: false
|
|
||||||
# don't specify 'tags' here (error "get can't push tagged ref by digest")
|
|
||||||
# tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
annotations: ${{ steps.meta.outputs.annotations }}
|
|
||||||
platforms: ${{ matrix.platform }}
|
|
||||||
outputs: |
|
|
||||||
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
|
|
||||||
cache-from: |
|
|
||||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }}
|
|
||||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-cache
|
|
||||||
cache-to: |
|
|
||||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }},image-manifest=true,mode=max,compression=zstd
|
|
||||||
|
|
||||||
- name: Export Image Digest
|
|
||||||
run: |
|
|
||||||
mkdir -p /tmp/digests
|
|
||||||
digest="${{ steps.build.outputs.digest }}"
|
|
||||||
touch "/tmp/digests/${digest#sha256:}"
|
|
||||||
|
|
||||||
- name: Upload Image Digest
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: digests-${{ env.PLATFORM }}
|
|
||||||
path: /tmp/digests/*
|
|
||||||
retention-days: 1
|
|
||||||
if-no-files-found: error
|
|
||||||
|
|
||||||
- name: Delete Existing Cache
|
|
||||||
if: github.ref_name == github.event.repository.default_branch && steps.go-cache.outputs.cache-hit != 'true'
|
|
||||||
continue-on-error: true
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
run: |
|
|
||||||
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
|
|
||||||
for cache_id in "${cache_ids[@]}"; do
|
|
||||||
echo "Deleting Cache: $cache_id"
|
|
||||||
gh cache delete "$cache_id"
|
|
||||||
done
|
|
||||||
|
|
||||||
merge-image:
|
|
||||||
name: Merge & Push Final Docker Image
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
needs:
|
|
||||||
- build-image
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Download Image Digests
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
path: /tmp/digests
|
|
||||||
pattern: digests-*
|
|
||||||
merge-multiple: true
|
|
||||||
|
|
||||||
- name: Set REPO_NAME Variable
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
|
||||||
|
|
||||||
- name: Extract Metadata (tags, labels) for Docker
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
env:
|
|
||||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
|
||||||
with:
|
|
||||||
images: |
|
|
||||||
${{ env.REPO_NAME }}
|
|
||||||
ghcr.io/${{ env.REPO_NAME }}
|
|
||||||
labels: |
|
|
||||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
|
||||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
|
||||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
|
||||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
|
||||||
org.opencontainers.image.revision=${{ github.sha }}
|
|
||||||
tags: |
|
|
||||||
type=sha
|
|
||||||
type=ref,event=pr
|
|
||||||
type=ref,event=branch
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=semver,pattern={{major}}
|
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
|
||||||
type=raw,value=beta,enable={{is_default_branch}}
|
|
||||||
|
|
||||||
- name: Extract Tags
|
|
||||||
shell: python
|
|
||||||
run: |
|
|
||||||
import json, os
|
|
||||||
|
|
||||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
|
||||||
metadata = json.loads(metadata_json)
|
|
||||||
|
|
||||||
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
|
|
||||||
tags_string = " ".join(tags)
|
|
||||||
|
|
||||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
|
||||||
env.write(f"TAGS={tags_string}\n")
|
|
||||||
|
|
||||||
- name: Extract Annotations
|
|
||||||
shell: python
|
|
||||||
run: |
|
|
||||||
import json, os
|
|
||||||
|
|
||||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
|
||||||
metadata = json.loads(metadata_json)
|
|
||||||
|
|
||||||
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
|
|
||||||
annotations_string = " ".join(annotations)
|
|
||||||
|
|
||||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
|
||||||
env.write(f"ANNOTATIONS={annotations_string}\n")
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Login to Docker Hub
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
# This is the user that triggered the Workflow. In this case, it will
|
|
||||||
# either be the user whom created the Release or manually triggered
|
|
||||||
# the workflow_dispatch.
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Create & Push Manifest List
|
|
||||||
working-directory: /tmp/digests
|
|
||||||
run: |
|
|
||||||
docker buildx imagetools create \
|
|
||||||
${{ env.TAGS }} \
|
|
||||||
${{ env.ANNOTATIONS }} \
|
|
||||||
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
|
|
||||||
|
|
||||||
- name: Inspect and Run Multi-Platform Image
|
|
||||||
run: |
|
|
||||||
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
|
||||||
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
|
||||||
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
---
|
|
||||||
# Github Actions release for rclone
|
|
||||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
|
|
||||||
|
|
||||||
name: Release Build for Docker Plugin
|
|
||||||
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types: [published]
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
manual:
|
|
||||||
description: Manual run (bypass default conditions)
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build_docker_volume_plugin:
|
|
||||||
if: inputs.manual || github.repository == 'rclone/rclone'
|
|
||||||
name: Build docker plugin job
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Free some space
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
df -h .
|
|
||||||
# Remove android SDK
|
|
||||||
sudo rm -rf /usr/local/lib/android || true
|
|
||||||
# Remove .net runtime
|
|
||||||
sudo rm -rf /usr/share/dotnet || true
|
|
||||||
df -h .
|
|
||||||
- name: Checkout master
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Build and publish docker plugin
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
VER=${GITHUB_REF#refs/tags/}
|
|
||||||
PLUGIN_USER=rclone
|
|
||||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
|
||||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
|
||||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
|
||||||
export PLUGIN_USER PLUGIN_ARCH
|
|
||||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
|
||||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
|
||||||
done
|
|
||||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
|
||||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
|
||||||
77
.github/workflows/build_publish_release_docker_image.yml
vendored
Normal file
77
.github/workflows/build_publish_release_docker_image.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
name: Docker release build
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
if: github.repository == 'rclone/rclone'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Build image job
|
||||||
|
steps:
|
||||||
|
- name: Free some space
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
df -h .
|
||||||
|
# Remove android SDK
|
||||||
|
sudo rm -rf /usr/local/lib/android || true
|
||||||
|
# Remove .net runtime
|
||||||
|
sudo rm -rf /usr/share/dotnet || true
|
||||||
|
df -h .
|
||||||
|
- name: Checkout master
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Get actual patch version
|
||||||
|
id: actual_patch_version
|
||||||
|
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
|
||||||
|
- name: Get actual minor version
|
||||||
|
id: actual_minor_version
|
||||||
|
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
|
||||||
|
- name: Get actual major version
|
||||||
|
id: actual_major_version
|
||||||
|
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||||
|
- name: Build and publish image
|
||||||
|
uses: ilteoood/docker_buildx@1.1.0
|
||||||
|
with:
|
||||||
|
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||||
|
imageName: rclone/rclone
|
||||||
|
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||||
|
publish: true
|
||||||
|
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||||
|
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||||
|
|
||||||
|
build_docker_volume_plugin:
|
||||||
|
if: github.repository == 'rclone/rclone'
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Build docker plugin job
|
||||||
|
steps:
|
||||||
|
- name: Free some space
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
df -h .
|
||||||
|
# Remove android SDK
|
||||||
|
sudo rm -rf /usr/local/lib/android || true
|
||||||
|
# Remove .net runtime
|
||||||
|
sudo rm -rf /usr/share/dotnet || true
|
||||||
|
df -h .
|
||||||
|
- name: Checkout master
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Build and publish docker plugin
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
VER=${GITHUB_REF#refs/tags/}
|
||||||
|
PLUGIN_USER=rclone
|
||||||
|
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||||
|
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||||
|
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||||
|
export PLUGIN_USER PLUGIN_ARCH
|
||||||
|
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||||
|
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||||
|
done
|
||||||
|
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||||
|
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||||
104
.github/workflows/lint.yml
vendored
104
.github/workflows/lint.yml
vendored
@@ -1,104 +0,0 @@
|
|||||||
---
|
|
||||||
# Github Actions build for rclone
|
|
||||||
# -*- compile-command: "yamllint -f parsable lint.yml" -*-
|
|
||||||
|
|
||||||
name: Lint & Vulnerability Check
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
# Trigger the workflow on push or pull request
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- '**'
|
|
||||||
tags:
|
|
||||||
- '**'
|
|
||||||
pull_request:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
manual:
|
|
||||||
description: Manual run (bypass default conditions)
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
lint:
|
|
||||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
|
||||||
timeout-minutes: 30
|
|
||||||
name: "lint"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Get runner parameters
|
|
||||||
id: get-runner-parameters
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
|
||||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Install Go
|
|
||||||
id: setup-go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: '>=1.23.0-rc.1'
|
|
||||||
check-latest: true
|
|
||||||
cache: false
|
|
||||||
|
|
||||||
- name: Cache
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/go/pkg/mod
|
|
||||||
~/.cache/go-build
|
|
||||||
~/.cache/golangci-lint
|
|
||||||
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
|
|
||||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
|
||||||
|
|
||||||
- name: Code quality test (Linux)
|
|
||||||
uses: golangci/golangci-lint-action@v6
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (Windows)
|
|
||||||
uses: golangci/golangci-lint-action@v6
|
|
||||||
env:
|
|
||||||
GOOS: "windows"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (macOS)
|
|
||||||
uses: golangci/golangci-lint-action@v6
|
|
||||||
env:
|
|
||||||
GOOS: "darwin"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (FreeBSD)
|
|
||||||
uses: golangci/golangci-lint-action@v6
|
|
||||||
env:
|
|
||||||
GOOS: "freebsd"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (OpenBSD)
|
|
||||||
uses: golangci/golangci-lint-action@v6
|
|
||||||
env:
|
|
||||||
GOOS: "openbsd"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Install govulncheck
|
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
|
||||||
|
|
||||||
- name: Scan for vulnerabilities
|
|
||||||
run: govulncheck ./...
|
|
||||||
15
.github/workflows/notify.yml
vendored
15
.github/workflows/notify.yml
vendored
@@ -1,15 +0,0 @@
|
|||||||
name: Notify users based on issue labels
|
|
||||||
|
|
||||||
on:
|
|
||||||
issues:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
notify:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: jenschelkopf/issue-label-notification-action@1.3
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.NOTIFY_ACTION_TOKEN }}
|
|
||||||
recipients: |
|
|
||||||
Support Contract=@rclone/support
|
|
||||||
28
.github/workflows/winget.yml
vendored
28
.github/workflows/winget.yml
vendored
@@ -1,14 +1,14 @@
|
|||||||
name: Publish to Winget
|
name: Publish to Winget
|
||||||
on:
|
on:
|
||||||
release:
|
release:
|
||||||
types: [released]
|
types: [released]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
publish:
|
publish:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||||
with:
|
with:
|
||||||
identifier: Rclone.Rclone
|
identifier: Rclone.Rclone
|
||||||
installers-regex: '-windows-\w+\.zip$'
|
installers-regex: '-windows-\w+\.zip$'
|
||||||
token: ${{ secrets.WINGET_TOKEN }}
|
token: ${{ secrets.WINGET_TOKEN }}
|
||||||
|
|||||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -3,13 +3,10 @@ _junk/
|
|||||||
rclone
|
rclone
|
||||||
rclone.exe
|
rclone.exe
|
||||||
build
|
build
|
||||||
/docs/public/
|
docs/public
|
||||||
/docs/.hugo_build.lock
|
|
||||||
/docs/static/img/logos/
|
|
||||||
rclone.iml
|
rclone.iml
|
||||||
.idea
|
.idea
|
||||||
.history
|
.history
|
||||||
.vscode
|
|
||||||
*.test
|
*.test
|
||||||
*.iml
|
*.iml
|
||||||
fuzz-build.zip
|
fuzz-build.zip
|
||||||
@@ -17,6 +14,4 @@ fuzz-build.zip
|
|||||||
*.rej
|
*.rej
|
||||||
Thumbs.db
|
Thumbs.db
|
||||||
__pycache__
|
__pycache__
|
||||||
.DS_Store
|
.DS_Store
|
||||||
resource_windows_*.syso
|
|
||||||
.devcontainer
|
|
||||||
@@ -13,7 +13,6 @@ linters:
|
|||||||
- stylecheck
|
- stylecheck
|
||||||
- unused
|
- unused
|
||||||
- misspell
|
- misspell
|
||||||
- gocritic
|
|
||||||
#- prealloc
|
#- prealloc
|
||||||
#- maligned
|
#- maligned
|
||||||
disable-all: true
|
disable-all: true
|
||||||
@@ -99,46 +98,3 @@ linters-settings:
|
|||||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||||
gocritic:
|
|
||||||
# Enable all default checks with some exceptions and some additions (commented).
|
|
||||||
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
|
|
||||||
disable-all: true
|
|
||||||
enabled-checks:
|
|
||||||
#- appendAssign # Enabled by default
|
|
||||||
- argOrder
|
|
||||||
- assignOp
|
|
||||||
- badCall
|
|
||||||
- badCond
|
|
||||||
#- captLocal # Enabled by default
|
|
||||||
- caseOrder
|
|
||||||
- codegenComment
|
|
||||||
#- commentFormatting # Enabled by default
|
|
||||||
- defaultCaseOrder
|
|
||||||
- deprecatedComment
|
|
||||||
- dupArg
|
|
||||||
- dupBranchBody
|
|
||||||
- dupCase
|
|
||||||
- dupSubExpr
|
|
||||||
- elseif
|
|
||||||
#- exitAfterDefer # Enabled by default
|
|
||||||
- flagDeref
|
|
||||||
- flagName
|
|
||||||
#- ifElseChain # Enabled by default
|
|
||||||
- mapKey
|
|
||||||
- newDeref
|
|
||||||
- offBy1
|
|
||||||
- regexpMust
|
|
||||||
- ruleguard # Not enabled by default
|
|
||||||
#- singleCaseSwitch # Enabled by default
|
|
||||||
- sloppyLen
|
|
||||||
- sloppyTypeAssert
|
|
||||||
- switchTrue
|
|
||||||
- typeSwitchVar
|
|
||||||
- underef
|
|
||||||
- unlambda
|
|
||||||
- unslice
|
|
||||||
- valSwap
|
|
||||||
- wrapperFunc
|
|
||||||
settings:
|
|
||||||
ruleguard:
|
|
||||||
rules: "${configDir}/bin/rules.go"
|
|
||||||
|
|||||||
333
CONTRIBUTING.md
333
CONTRIBUTING.md
@@ -1,8 +1,8 @@
|
|||||||
# Contributing to rclone
|
# Contributing to rclone #
|
||||||
|
|
||||||
This is a short guide on how to contribute things to rclone.
|
This is a short guide on how to contribute things to rclone.
|
||||||
|
|
||||||
## Reporting a bug
|
## Reporting a bug ##
|
||||||
|
|
||||||
If you've just got a question or aren't sure if you've found a bug
|
If you've just got a question or aren't sure if you've found a bug
|
||||||
then please use the [rclone forum](https://forum.rclone.org/) instead
|
then please use the [rclone forum](https://forum.rclone.org/) instead
|
||||||
@@ -12,13 +12,13 @@ When filing an issue, please include the following information if
|
|||||||
possible as well as a description of the problem. Make sure you test
|
possible as well as a description of the problem. Make sure you test
|
||||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||||
|
|
||||||
- Rclone version (e.g. output from `rclone version`)
|
* Rclone version (e.g. output from `rclone version`)
|
||||||
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
||||||
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||||
- A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||||
- if the log contains secrets then edit the file with a text editor first to obscure them
|
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||||
|
|
||||||
## Submitting a new feature or bug fix
|
## Submitting a new feature or bug fix ##
|
||||||
|
|
||||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||||
like to implement then please submit a pull request via GitHub.
|
like to implement then please submit a pull request via GitHub.
|
||||||
@@ -73,9 +73,9 @@ This is typically enough if you made a simple bug fix, otherwise please read the
|
|||||||
|
|
||||||
Make sure you
|
Make sure you
|
||||||
|
|
||||||
- Add [unit tests](#testing) for a new feature.
|
* Add [unit tests](#testing) for a new feature.
|
||||||
- Add [documentation](#writing-documentation) for a new feature.
|
* Add [documentation](#writing-documentation) for a new feature.
|
||||||
- [Commit your changes](#committing-your-changes) using the [commit message guidelines](#commit-messages).
|
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
||||||
|
|
||||||
When you are done with that push your changes to GitHub:
|
When you are done with that push your changes to GitHub:
|
||||||
|
|
||||||
@@ -88,9 +88,9 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
|
|||||||
|
|
||||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||||
|
|
||||||
## Using Git and GitHub
|
## Using Git and GitHub ##
|
||||||
|
|
||||||
### Committing your changes
|
### Committing your changes ###
|
||||||
|
|
||||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||||
|
|
||||||
@@ -107,7 +107,7 @@ You can modify the message or changes in the latest commit using:
|
|||||||
|
|
||||||
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||||
|
|
||||||
### Replacing your previously pushed commits
|
### Replacing your previously pushed commits ###
|
||||||
|
|
||||||
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
||||||
|
|
||||||
@@ -115,7 +115,7 @@ Your previously pushed commits are replaced by:
|
|||||||
|
|
||||||
git push --force origin my-new-feature
|
git push --force origin my-new-feature
|
||||||
|
|
||||||
### Basing your changes on the latest master
|
### Basing your changes on the latest master ###
|
||||||
|
|
||||||
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||||
|
|
||||||
@@ -149,21 +149,13 @@ If you squash commits that have been pushed to GitHub, then you will have to [re
|
|||||||
|
|
||||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
||||||
|
|
||||||
### GitHub Continuous Integration
|
### GitHub Continuous Integration ###
|
||||||
|
|
||||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||||
|
|
||||||
## Testing
|
## Testing ##
|
||||||
|
|
||||||
### Code quality tests
|
### Quick testing ###
|
||||||
|
|
||||||
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
|
|
||||||
|
|
||||||
You can run them with `make check` or with `golangci-lint run ./...`.
|
|
||||||
|
|
||||||
Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
|
|
||||||
|
|
||||||
### Quick testing
|
|
||||||
|
|
||||||
rclone's tests are run from the go testing framework, so at the top
|
rclone's tests are run from the go testing framework, so at the top
|
||||||
level you can run this to run all the tests.
|
level you can run this to run all the tests.
|
||||||
@@ -176,7 +168,7 @@ You can also use `make`, if supported by your platform
|
|||||||
|
|
||||||
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
||||||
|
|
||||||
### Backend testing
|
### Backend testing ###
|
||||||
|
|
||||||
rclone contains a mixture of unit tests and integration tests.
|
rclone contains a mixture of unit tests and integration tests.
|
||||||
Because it is difficult (and in some respects pointless) to test cloud
|
Because it is difficult (and in some respects pointless) to test cloud
|
||||||
@@ -209,9 +201,9 @@ altogether with an HTML report and test retries then from the
|
|||||||
project root:
|
project root:
|
||||||
|
|
||||||
go install github.com/rclone/rclone/fstest/test_all
|
go install github.com/rclone/rclone/fstest/test_all
|
||||||
test_all -backends drive
|
test_all -backend drive
|
||||||
|
|
||||||
### Full integration testing
|
### Full integration testing ###
|
||||||
|
|
||||||
If you want to run all the integration tests against all the remotes,
|
If you want to run all the integration tests against all the remotes,
|
||||||
then change into the project root and run
|
then change into the project root and run
|
||||||
@@ -226,56 +218,55 @@ The commands may require some extra go packages which you can install with
|
|||||||
The full integration tests are run daily on the integration test server. You can
|
The full integration tests are run daily on the integration test server. You can
|
||||||
find the results at https://pub.rclone.org/integration-tests/
|
find the results at https://pub.rclone.org/integration-tests/
|
||||||
|
|
||||||
## Code Organisation
|
## Code Organisation ##
|
||||||
|
|
||||||
Rclone code is organised into a small number of top level directories
|
Rclone code is organised into a small number of top level directories
|
||||||
with modules beneath.
|
with modules beneath.
|
||||||
|
|
||||||
- backend - the rclone backends for interfacing to cloud providers -
|
* backend - the rclone backends for interfacing to cloud providers -
|
||||||
- all - import this to load all the cloud providers
|
* all - import this to load all the cloud providers
|
||||||
- ...providers
|
* ...providers
|
||||||
- bin - scripts for use while building or maintaining rclone
|
* bin - scripts for use while building or maintaining rclone
|
||||||
- cmd - the rclone commands
|
* cmd - the rclone commands
|
||||||
- all - import this to load all the commands
|
* all - import this to load all the commands
|
||||||
- ...commands
|
* ...commands
|
||||||
- cmdtest - end-to-end tests of commands, flags, environment variables,...
|
* cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||||
- docs - the documentation and website
|
* docs - the documentation and website
|
||||||
- content - adjust these docs only - everything else is autogenerated
|
* content - adjust these docs only - everything else is autogenerated
|
||||||
- command - these are auto-generated - edit the corresponding .go file
|
* command - these are auto-generated - edit the corresponding .go file
|
||||||
- fs - main rclone definitions - minimal amount of code
|
* fs - main rclone definitions - minimal amount of code
|
||||||
- accounting - bandwidth limiting and statistics
|
* accounting - bandwidth limiting and statistics
|
||||||
- asyncreader - an io.Reader which reads ahead
|
* asyncreader - an io.Reader which reads ahead
|
||||||
- config - manage the config file and flags
|
* config - manage the config file and flags
|
||||||
- driveletter - detect if a name is a drive letter
|
* driveletter - detect if a name is a drive letter
|
||||||
- filter - implements include/exclude filtering
|
* filter - implements include/exclude filtering
|
||||||
- fserrors - rclone specific error handling
|
* fserrors - rclone specific error handling
|
||||||
- fshttp - http handling for rclone
|
* fshttp - http handling for rclone
|
||||||
- fspath - path handling for rclone
|
* fspath - path handling for rclone
|
||||||
- hash - defines rclone's hash types and functions
|
* hash - defines rclone's hash types and functions
|
||||||
- list - list a remote
|
* list - list a remote
|
||||||
- log - logging facilities
|
* log - logging facilities
|
||||||
- march - iterates directories in lock step
|
* march - iterates directories in lock step
|
||||||
- object - in memory Fs objects
|
* object - in memory Fs objects
|
||||||
- operations - primitives for sync, e.g. Copy, Move
|
* operations - primitives for sync, e.g. Copy, Move
|
||||||
- sync - sync directories
|
* sync - sync directories
|
||||||
- walk - walk a directory
|
* walk - walk a directory
|
||||||
- fstest - provides integration test framework
|
* fstest - provides integration test framework
|
||||||
- fstests - integration tests for the backends
|
* fstests - integration tests for the backends
|
||||||
- mockdir - mocks an fs.Directory
|
* mockdir - mocks an fs.Directory
|
||||||
- mockobject - mocks an fs.Object
|
* mockobject - mocks an fs.Object
|
||||||
- test_all - Runs integration tests for everything
|
* test_all - Runs integration tests for everything
|
||||||
- graphics - the images used in the website, etc.
|
* graphics - the images used in the website, etc.
|
||||||
- lib - libraries used by the backend
|
* lib - libraries used by the backend
|
||||||
- atexit - register functions to run when rclone exits
|
* atexit - register functions to run when rclone exits
|
||||||
- dircache - directory ID to name caching
|
* dircache - directory ID to name caching
|
||||||
- oauthutil - helpers for using oauth
|
* oauthutil - helpers for using oauth
|
||||||
- pacer - retries with backoff and paces operations
|
* pacer - retries with backoff and paces operations
|
||||||
- readers - a selection of useful io.Readers
|
* readers - a selection of useful io.Readers
|
||||||
- rest - a thin abstraction over net/http for REST
|
* rest - a thin abstraction over net/http for REST
|
||||||
- librclone - in memory interface to rclone's API for embedding rclone
|
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||||
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
|
||||||
|
|
||||||
## Writing Documentation
|
## Writing Documentation ##
|
||||||
|
|
||||||
If you are adding a new feature then please update the documentation.
|
If you are adding a new feature then please update the documentation.
|
||||||
|
|
||||||
@@ -286,22 +277,22 @@ alphabetical order.
|
|||||||
If you add a new backend option/flag, then it should be documented in
|
If you add a new backend option/flag, then it should be documented in
|
||||||
the source file in the `Help:` field.
|
the source file in the `Help:` field.
|
||||||
|
|
||||||
- Start with the most important information about the option,
|
* Start with the most important information about the option,
|
||||||
as a single sentence on a single line.
|
as a single sentence on a single line.
|
||||||
- This text will be used for the command-line flag help.
|
* This text will be used for the command-line flag help.
|
||||||
- It will be combined with other information, such as any default value,
|
* It will be combined with other information, such as any default value,
|
||||||
and the result will look odd if not written as a single sentence.
|
and the result will look odd if not written as a single sentence.
|
||||||
- It should end with a period/full stop character, which will be shown
|
* It should end with a period/full stop character, which will be shown
|
||||||
in docs but automatically removed when producing the flag help.
|
in docs but automatically removed when producing the flag help.
|
||||||
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
* Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
||||||
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
* More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
||||||
- Like with docs generated from Markdown, a single line break is ignored
|
* Like with docs generated from Markdown, a single line break is ignored
|
||||||
and two line breaks creates a new paragraph.
|
and two line breaks creates a new paragraph.
|
||||||
- This text will be shown to the user in `rclone config`
|
* This text will be shown to the user in `rclone config`
|
||||||
and in the docs (where it will be added by `make backenddocs`,
|
and in the docs (where it will be added by `make backenddocs`,
|
||||||
normally run some time before next release).
|
normally run some time before next release).
|
||||||
- To create options of enumeration type use the `Examples:` field.
|
* To create options of enumeration type use the `Examples:` field.
|
||||||
- Each example value have their own `Help:` field, but they are treated
|
* Each example value have their own `Help:` field, but they are treated
|
||||||
a bit different than the main option help text. They will be shown
|
a bit different than the main option help text. They will be shown
|
||||||
as an unordered list, therefore a single line break is enough to
|
as an unordered list, therefore a single line break is enough to
|
||||||
create a new list item. Also, for enumeration texts like name of
|
create a new list item. Also, for enumeration texts like name of
|
||||||
@@ -321,12 +312,12 @@ combined unmodified with other information (such as any default value).
|
|||||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||||
for small changes in the docs which makes it very easy.
|
for small changes in the docs which makes it very easy.
|
||||||
|
|
||||||
## Making a release
|
## Making a release ##
|
||||||
|
|
||||||
There are separate instructions for making a release in the RELEASE.md
|
There are separate instructions for making a release in the RELEASE.md
|
||||||
file.
|
file.
|
||||||
|
|
||||||
## Commit messages
|
## Commit messages ##
|
||||||
|
|
||||||
Please make the first line of your commit message a summary of the
|
Please make the first line of your commit message a summary of the
|
||||||
change that a user (not a developer) of rclone would like to read, and
|
change that a user (not a developer) of rclone would like to read, and
|
||||||
@@ -367,7 +358,7 @@ error fixing the hang.
|
|||||||
Fixes #1498
|
Fixes #1498
|
||||||
```
|
```
|
||||||
|
|
||||||
## Adding a dependency
|
## Adding a dependency ##
|
||||||
|
|
||||||
rclone uses the [go
|
rclone uses the [go
|
||||||
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
||||||
@@ -379,7 +370,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
|||||||
instructions below. These will fetch the dependency and add it to
|
instructions below. These will fetch the dependency and add it to
|
||||||
`go.mod` and `go.sum`.
|
`go.mod` and `go.sum`.
|
||||||
|
|
||||||
go get github.com/ncw/new_dependency
|
GO111MODULE=on go get github.com/ncw/new_dependency
|
||||||
|
|
||||||
You can add constraints on that package when doing `go get` (see the
|
You can add constraints on that package when doing `go get` (see the
|
||||||
go docs linked above), but don't unless you really need to.
|
go docs linked above), but don't unless you really need to.
|
||||||
@@ -387,15 +378,15 @@ go docs linked above), but don't unless you really need to.
|
|||||||
Please check in the changes generated by `go mod` including `go.mod`
|
Please check in the changes generated by `go mod` including `go.mod`
|
||||||
and `go.sum` in the same commit as your other changes.
|
and `go.sum` in the same commit as your other changes.
|
||||||
|
|
||||||
## Updating a dependency
|
## Updating a dependency ##
|
||||||
|
|
||||||
If you need to update a dependency then run
|
If you need to update a dependency then run
|
||||||
|
|
||||||
go get golang.org/x/crypto
|
GO111MODULE=on go get -u golang.org/x/crypto
|
||||||
|
|
||||||
Check in a single commit as above.
|
Check in a single commit as above.
|
||||||
|
|
||||||
## Updating all the dependencies
|
## Updating all the dependencies ##
|
||||||
|
|
||||||
In order to update all the dependencies then run `make update`. This
|
In order to update all the dependencies then run `make update`. This
|
||||||
just uses the go modules to update all the modules to their latest
|
just uses the go modules to update all the modules to their latest
|
||||||
@@ -404,7 +395,7 @@ stable release. Check in the changes in a single commit as above.
|
|||||||
This should be done early in the release cycle to pick up new versions
|
This should be done early in the release cycle to pick up new versions
|
||||||
of packages in time for them to get some testing.
|
of packages in time for them to get some testing.
|
||||||
|
|
||||||
## Updating a backend
|
## Updating a backend ##
|
||||||
|
|
||||||
If you update a backend then please run the unit tests and the
|
If you update a backend then please run the unit tests and the
|
||||||
integration tests for that backend.
|
integration tests for that backend.
|
||||||
@@ -419,133 +410,87 @@ integration tests.
|
|||||||
|
|
||||||
The next section goes into more detail about the tests.
|
The next section goes into more detail about the tests.
|
||||||
|
|
||||||
## Writing a new backend
|
## Writing a new backend ##
|
||||||
|
|
||||||
Choose a name. The docs here will use `remote` as an example.
|
Choose a name. The docs here will use `remote` as an example.
|
||||||
|
|
||||||
Note that in rclone terminology a file system backend is called a
|
Note that in rclone terminology a file system backend is called a
|
||||||
remote or an fs.
|
remote or an fs.
|
||||||
|
|
||||||
### Research
|
Research
|
||||||
|
|
||||||
- Look at the interfaces defined in `fs/types.go`
|
* Look at the interfaces defined in `fs/types.go`
|
||||||
- Study one or more of the existing remotes
|
* Study one or more of the existing remotes
|
||||||
|
|
||||||
### Getting going
|
Getting going
|
||||||
|
|
||||||
- Create `backend/remote/remote.go` (copy this from a similar remote)
|
* Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||||
- box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache)
|
* box is a good one to start from if you have a directory-based remote
|
||||||
- b2 is a good one to start from if you have a bucket-based remote
|
* b2 is a good one to start from if you have a bucket-based remote
|
||||||
- Add your remote to the imports in `backend/all/all.go`
|
* Add your remote to the imports in `backend/all/all.go`
|
||||||
- HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead.
|
* HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good go SDK then use that instead.
|
||||||
- Try to implement as many optional methods as possible as it makes the remote more usable.
|
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||||
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
* Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||||
- `rclone purge -v TestRemote:rclone-info`
|
* `rclone purge -v TestRemote:rclone-info`
|
||||||
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||||
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||||
- open `remote.csv` in a spreadsheet and examine
|
* open `remote.csv` in a spreadsheet and examine
|
||||||
|
|
||||||
### Guidelines for a speedy merge
|
Important:
|
||||||
|
|
||||||
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend.
|
* Please use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend. It makes maintenance much easier.
|
||||||
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
|
* If your backend is HTTP based then please use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
|
||||||
- **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments.
|
|
||||||
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
|
|
||||||
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
|
|
||||||
- **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
|
|
||||||
|
|
||||||
### Unit tests
|
Unit tests
|
||||||
|
|
||||||
- Create a config entry called `TestRemote` for the unit tests to use
|
* Create a config entry called `TestRemote` for the unit tests to use
|
||||||
- Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
||||||
- Make sure all tests pass with `go test -v`
|
* Make sure all tests pass with `go test -v`
|
||||||
|
|
||||||
### Integration tests
|
Integration tests
|
||||||
|
|
||||||
- Add your backend to `fstest/test_all/config.yaml`
|
* Add your backend to `fstest/test_all/config.yaml`
|
||||||
- Once you've done that then you can use the integration test framework from the project root:
|
* Once you've done that then you can use the integration test framework from the project root:
|
||||||
- go install ./...
|
* go install ./...
|
||||||
- test_all -backends remote
|
* test_all -backends remote
|
||||||
|
|
||||||
Or if you want to run the integration tests manually:
|
Or if you want to run the integration tests manually:
|
||||||
|
|
||||||
- Make sure integration tests pass with
|
* Make sure integration tests pass with
|
||||||
- `cd fs/operations`
|
* `cd fs/operations`
|
||||||
- `go test -v -remote TestRemote:`
|
* `go test -v -remote TestRemote:`
|
||||||
- `cd fs/sync`
|
* `cd fs/sync`
|
||||||
- `go test -v -remote TestRemote:`
|
* `go test -v -remote TestRemote:`
|
||||||
- If your remote defines `ListR` check with this also
|
* If your remote defines `ListR` check with this also
|
||||||
- `go test -v -remote TestRemote: -fast-list`
|
* `go test -v -remote TestRemote: -fast-list`
|
||||||
|
|
||||||
See the [testing](#testing) section for more information on integration tests.
|
See the [testing](#testing) section for more information on integration tests.
|
||||||
|
|
||||||
### Backend documentation
|
Add your fs to the docs - you'll need to pick an icon for it from
|
||||||
|
|
||||||
Add your backend to the docs - you'll need to pick an icon for it from
|
|
||||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
||||||
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||||
`Google Drive`) but with the local file system last.
|
`Google Drive`) but with the local file system last.
|
||||||
|
|
||||||
- `README.md` - main GitHub page
|
* `README.md` - main GitHub page
|
||||||
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||||
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
* update them in your backend with `bin/make_backend_docs.py remote`
|
||||||
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
|
* `docs/content/overview.md` - overview docs
|
||||||
- `docs/content/docs.md` - list of remotes in config section
|
* `docs/content/docs.md` - list of remotes in config section
|
||||||
- `docs/content/_index.md` - front page of rclone.org
|
* `docs/content/_index.md` - front page of rclone.org
|
||||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||||
- `bin/make_manual.py` - add the page to the `docs` constant
|
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||||
|
|
||||||
Once you've written the docs, run `make serve` and check they look OK
|
Once you've written the docs, run `make serve` and check they look OK
|
||||||
in the web browser and the links (internal and external) all work.
|
in the web browser and the links (internal and external) all work.
|
||||||
|
|
||||||
## Adding a new s3 provider
|
## Writing a plugin ##
|
||||||
|
|
||||||
It is quite easy to add a new S3 provider to rclone.
|
|
||||||
|
|
||||||
You'll need to modify the following files
|
|
||||||
|
|
||||||
- `backend/s3/s3.go`
|
|
||||||
- Add the provider to `providerOption` at the top of the file
|
|
||||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
|
||||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
|
||||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
|
||||||
- `docs/content/s3.md`
|
|
||||||
- Add the provider at the top of the page.
|
|
||||||
- Add a section about the provider linked from there.
|
|
||||||
- Add a transcript of a trial `rclone config` session
|
|
||||||
- Edit the transcript to remove things which might change in subsequent versions
|
|
||||||
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
|
||||||
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
|
||||||
- `README.md` - this is the home page in github
|
|
||||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
|
||||||
- `docs/content/_index.md` - this is the home page of rclone.org
|
|
||||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
|
||||||
|
|
||||||
When adding the provider, endpoints, quirks, docs etc keep them in
|
|
||||||
alphabetical order by `Provider` name, but with `AWS` first and
|
|
||||||
`Other` last.
|
|
||||||
|
|
||||||
Once you've written the docs, run `make serve` and check they look OK
|
|
||||||
in the web browser and the links (internal and external) all work.
|
|
||||||
|
|
||||||
Once you've written the code, test `rclone config` works to your
|
|
||||||
satisfaction, and check the integration tests work `go test -v -remote
|
|
||||||
NewS3Provider:`. You may need to adjust the quirks to get them to
|
|
||||||
pass. Some providers just can't pass the tests with control characters
|
|
||||||
in the names so if these fail and the provider doesn't support
|
|
||||||
`urlEncodeListings` in the quirks then ignore them. Note that the
|
|
||||||
`SetTier` test may also fail on non AWS providers.
|
|
||||||
|
|
||||||
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
|
|
||||||
|
|
||||||
## Writing a plugin
|
|
||||||
|
|
||||||
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
||||||
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
||||||
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
||||||
|
|
||||||
### Usage
|
Usage
|
||||||
|
|
||||||
- Naming
|
- Naming
|
||||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||||
@@ -560,7 +505,7 @@ This is useful if you can't merge your changes upstream or don't want to maintai
|
|||||||
- Plugins must be compiled against the exact version of rclone to work.
|
- Plugins must be compiled against the exact version of rclone to work.
|
||||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||||
|
|
||||||
### Building
|
Building
|
||||||
|
|
||||||
To turn your existing additions into a Go plugin, move them to an external repository
|
To turn your existing additions into a Go plugin, move them to an external repository
|
||||||
and change the top-level package name to `main`.
|
and change the top-level package name to `main`.
|
||||||
@@ -571,18 +516,4 @@ Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
|||||||
|
|
||||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
||||||
|
|
||||||
## Keeping a backend or command out of tree
|
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
||||||
|
|
||||||
Rclone was designed to be modular so it is very easy to keep a backend
|
|
||||||
or a command out of the main rclone source tree.
|
|
||||||
|
|
||||||
So for example if you had a backend which accessed your proprietary
|
|
||||||
systems or a command which was specialised for your needs you could
|
|
||||||
add them out of tree.
|
|
||||||
|
|
||||||
This may be easier than using a plugin and is supported on all
|
|
||||||
platforms not just macOS and Linux.
|
|
||||||
|
|
||||||
This is explained further in https://github.com/rclone/rclone_out_of_tree_example
|
|
||||||
which has an example of an out of tree backend `ram` (which is a
|
|
||||||
renamed version of the `memory` backend).
|
|
||||||
|
|||||||
45
Dockerfile
45
Dockerfile
@@ -1,47 +1,18 @@
|
|||||||
FROM golang:alpine AS builder
|
FROM golang AS builder
|
||||||
|
|
||||||
ARG CGO_ENABLED=0
|
|
||||||
|
|
||||||
|
COPY . /go/src/github.com/rclone/rclone/
|
||||||
WORKDIR /go/src/github.com/rclone/rclone/
|
WORKDIR /go/src/github.com/rclone/rclone/
|
||||||
|
|
||||||
RUN echo "**** Set Go Environment Variables ****" && \
|
RUN \
|
||||||
go env -w GOCACHE=/root/.cache/go-build
|
CGO_ENABLED=0 \
|
||||||
|
make
|
||||||
RUN echo "**** Install Dependencies ****" && \
|
RUN ./rclone version
|
||||||
apk add --no-cache \
|
|
||||||
make \
|
|
||||||
bash \
|
|
||||||
gawk \
|
|
||||||
git
|
|
||||||
|
|
||||||
COPY go.mod .
|
|
||||||
COPY go.sum .
|
|
||||||
|
|
||||||
RUN echo "**** Download Go Dependencies ****" && \
|
|
||||||
go mod download -x
|
|
||||||
|
|
||||||
RUN echo "**** Verify Go Dependencies ****" && \
|
|
||||||
go mod verify
|
|
||||||
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
|
|
||||||
echo "**** Build Binary ****" && \
|
|
||||||
make
|
|
||||||
|
|
||||||
RUN echo "**** Print Version Binary ****" && \
|
|
||||||
./rclone version
|
|
||||||
|
|
||||||
# Begin final image
|
# Begin final image
|
||||||
FROM alpine:latest
|
FROM alpine:latest
|
||||||
|
|
||||||
RUN echo "**** Install Dependencies ****" && \
|
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||||
apk add --no-cache \
|
echo "user_allow_other" >> /etc/fuse.conf
|
||||||
ca-certificates \
|
|
||||||
fuse3 \
|
|
||||||
tzdata && \
|
|
||||||
echo "Enable user_allow_other in fuse" && \
|
|
||||||
echo "user_allow_other" >> /etc/fuse.conf
|
|
||||||
|
|
||||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||||
|
|
||||||
|
|||||||
@@ -21,8 +21,6 @@ Current active maintainers of rclone are:
|
|||||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
| Hideo Aoyama | @boukendesho | snap packaging |
|
||||||
| nielash | @nielash | bisync |
|
| nielash | @nielash | bisync |
|
||||||
| Dan McArdle | @dmcardle | gitannex |
|
|
||||||
| Sam Harrison | @childish-sambino | filescom |
|
|
||||||
|
|
||||||
**This is a work in progress Draft**
|
**This is a work in progress Draft**
|
||||||
|
|
||||||
|
|||||||
41035
MANUAL.html
generated
41035
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
44243
MANUAL.txt
generated
44243
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
50
Makefile
50
Makefile
@@ -30,37 +30,29 @@ ifdef RELEASE_TAG
|
|||||||
TAG := $(RELEASE_TAG)
|
TAG := $(RELEASE_TAG)
|
||||||
endif
|
endif
|
||||||
GO_VERSION := $(shell go version)
|
GO_VERSION := $(shell go version)
|
||||||
GO_OS := $(shell go env GOOS)
|
|
||||||
ifdef BETA_SUBDIR
|
ifdef BETA_SUBDIR
|
||||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||||
endif
|
endif
|
||||||
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
||||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||||
BETA_UPLOAD_ROOT := beta.rclone.org:
|
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||||
ifdef GOTAGS
|
ifdef GOTAGS
|
||||||
BUILDTAGS=-tags "$(GOTAGS)"
|
BUILDTAGS=-tags "$(GOTAGS)"
|
||||||
LINTTAGS=--build-tags "$(GOTAGS)"
|
LINTTAGS=--build-tags "$(GOTAGS)"
|
||||||
endif
|
endif
|
||||||
LDFLAGS=--ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)"
|
|
||||||
|
|
||||||
.PHONY: rclone test_all vars version
|
.PHONY: rclone test_all vars version
|
||||||
|
|
||||||
rclone:
|
rclone:
|
||||||
ifeq ($(GO_OS),windows)
|
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||||
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
|
|
||||||
endif
|
|
||||||
go build -v $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS)
|
|
||||||
ifeq ($(GO_OS),windows)
|
|
||||||
rm resource_windows_`go env GOARCH`.syso
|
|
||||||
endif
|
|
||||||
mkdir -p `go env GOPATH`/bin/
|
mkdir -p `go env GOPATH`/bin/
|
||||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||||
|
|
||||||
test_all:
|
test_all:
|
||||||
go install $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
@echo SHELL="'$(SHELL)'"
|
@echo SHELL="'$(SHELL)'"
|
||||||
@@ -74,10 +66,6 @@ btest:
|
|||||||
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
|
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
|
||||||
@echo "Copied markdown of beta release to clip board"
|
@echo "Copied markdown of beta release to clip board"
|
||||||
|
|
||||||
btesth:
|
|
||||||
@echo "<a href="$(BETA_URL)">$(TAG)</a> on branch <a href="https://github.com/rclone/rclone/tree/$(BRANCH)">$(BRANCH)</a> (uploaded in 15-30 mins)" | xclip -r -sel clip -t text/html
|
|
||||||
@echo "Copied beta release in HTML to clip board"
|
|
||||||
|
|
||||||
version:
|
version:
|
||||||
@echo '$(TAG)'
|
@echo '$(TAG)'
|
||||||
|
|
||||||
@@ -104,12 +92,16 @@ check: rclone
|
|||||||
|
|
||||||
# Get the build dependencies
|
# Get the build dependencies
|
||||||
build_dep:
|
build_dep:
|
||||||
go run bin/get-github-release.go -use-api -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||||
|
|
||||||
# Get the release dependencies we only install on linux
|
# Get the release dependencies we only install on linux
|
||||||
release_dep_linux:
|
release_dep_linux:
|
||||||
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
||||||
|
|
||||||
|
# Get the release dependencies we only install on Windows
|
||||||
|
release_dep_windows:
|
||||||
|
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
|
||||||
|
|
||||||
# Update dependencies
|
# Update dependencies
|
||||||
showupdates:
|
showupdates:
|
||||||
@echo "*** Direct dependencies that could be updated ***"
|
@echo "*** Direct dependencies that could be updated ***"
|
||||||
@@ -144,21 +136,17 @@ MANUAL.txt: MANUAL.md
|
|||||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||||
|
|
||||||
commanddocs: rclone
|
commanddocs: rclone
|
||||||
-@rmdir -p '$$HOME/.config/rclone'
|
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
||||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
|
||||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
|
||||||
|
|
||||||
backenddocs: rclone bin/make_backend_docs.py
|
backenddocs: rclone bin/make_backend_docs.py
|
||||||
-@rmdir -p '$$HOME/.config/rclone'
|
|
||||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
||||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
|
||||||
|
|
||||||
rcdocs: rclone
|
rcdocs: rclone
|
||||||
bin/make_rc_docs.sh
|
bin/make_rc_docs.sh
|
||||||
|
|
||||||
install: rclone
|
install: rclone
|
||||||
install -d ${DESTDIR}/usr/bin
|
install -d ${DESTDIR}/usr/bin
|
||||||
install ${GOPATH}/bin/rclone ${DESTDIR}/usr/bin
|
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
go clean ./...
|
go clean ./...
|
||||||
@@ -172,7 +160,7 @@ website:
|
|||||||
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
|
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
|
||||||
|
|
||||||
upload_website: website
|
upload_website: website
|
||||||
rclone -v sync docs/public www.rclone.org:
|
rclone -v sync docs/public memstore:www-rclone-org
|
||||||
|
|
||||||
upload_test_website: website
|
upload_test_website: website
|
||||||
rclone -P sync docs/public test-rclone-org:
|
rclone -P sync docs/public test-rclone-org:
|
||||||
@@ -199,8 +187,8 @@ check_sign:
|
|||||||
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
||||||
|
|
||||||
upload:
|
upload:
|
||||||
rclone -P copy build/ downloads.rclone.org:/$(TAG)
|
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
|
||||||
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "downloads.rclone.org:/$(TAG)/$$i" "downloads.rclone.org:/$$j"'
|
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
|
||||||
|
|
||||||
upload_github:
|
upload_github:
|
||||||
./bin/upload-github $(TAG)
|
./bin/upload-github $(TAG)
|
||||||
@@ -210,7 +198,7 @@ cross: doc
|
|||||||
|
|
||||||
beta:
|
beta:
|
||||||
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
rclone -v copy build/ pub.rclone.org:/$(TAG)
|
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||||
|
|
||||||
log_since_last_release:
|
log_since_last_release:
|
||||||
@@ -223,18 +211,18 @@ ci_upload:
|
|||||||
sudo chown -R $$USER build
|
sudo chown -R $$USER build
|
||||||
find build -type l -delete
|
find build -type l -delete
|
||||||
gzip -r9v build
|
gzip -r9v build
|
||||||
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||||
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||||
|
|
||||||
ci_beta:
|
ci_beta:
|
||||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||||
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)
|
@echo Beta release ready at $(BETA_URL)
|
||||||
|
|
||||||
@@ -243,7 +231,7 @@ fetch_binaries:
|
|||||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||||
|
|
||||||
serve: website
|
serve: website
|
||||||
cd docs && hugo server --logLevel info -w --disableFastRender
|
cd docs && hugo server -v -w --disableFastRender
|
||||||
|
|
||||||
tag: retag doc
|
tag: retag doc
|
||||||
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
||||||
|
|||||||
25
README.md
25
README.md
@@ -1,5 +1,3 @@
|
|||||||
|
|
||||||
|
|
||||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||||
|
|
||||||
@@ -25,6 +23,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||||
|
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||||
@@ -39,21 +38,14 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||||
* FileLu [:page_facing_up:](https://rclone.org/filelu/)
|
|
||||||
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
|
||||||
* FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
|
||||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||||
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
|
||||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||||
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
|
||||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||||
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
|
||||||
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
|
||||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||||
@@ -61,16 +53,11 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||||
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||||
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
|
||||||
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
|
||||||
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
|
||||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* MEGA [:page_facing_up:](https://rclone.org/mega/)
|
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||||
* MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
|
|
||||||
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||||
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
|
||||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||||
@@ -80,12 +67,10 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||||
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
|
||||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||||
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
|
||||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||||
@@ -94,12 +79,9 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||||
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
|
||||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||||
* Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
|
||||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||||
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
|
||||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||||
@@ -107,12 +89,10 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||||
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||||
* Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
|
|
||||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||||
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||||
* Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
|
|
||||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||||
|
|
||||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||||
@@ -137,7 +117,6 @@ These backends adapt or modify other storage providers
|
|||||||
* Partial syncs supported on a whole file basis
|
* Partial syncs supported on a whole file basis
|
||||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||||
* [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
|
|
||||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||||
* Can sync to and from network, e.g. two different cloud accounts
|
* Can sync to and from network, e.g. two different cloud accounts
|
||||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||||
|
|||||||
107
RELEASE.md
107
RELEASE.md
@@ -37,52 +37,16 @@ This file describes how to make the various kinds of releases
|
|||||||
|
|
||||||
## Update dependencies
|
## Update dependencies
|
||||||
|
|
||||||
Early in the next release cycle update the dependencies.
|
Early in the next release cycle update the dependencies
|
||||||
|
|
||||||
* Review any pinned packages in go.mod and remove if possible
|
* Review any pinned packages in go.mod and remove if possible
|
||||||
* `make updatedirect`
|
* make updatedirect
|
||||||
* `make GOTAGS=cmount`
|
* make
|
||||||
* `make compiletest`
|
* git commit -a -v
|
||||||
* Fix anything which doesn't compile at this point and commit changes here
|
* make update
|
||||||
* `git commit -a -v -m "build: update all dependencies"`
|
* make
|
||||||
|
|
||||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
|
||||||
|
|
||||||
go 1.22.0
|
|
||||||
|
|
||||||
then go to manual mode. `go1.22` here is the lowest supported version
|
|
||||||
in the `go.mod`.
|
|
||||||
|
|
||||||
If `make updatedirect` added a `toolchain` directive then remove it.
|
|
||||||
We don't want to force a toolchain on our users. Linux packagers are
|
|
||||||
often using a version of Go that is a few versions out of date.
|
|
||||||
|
|
||||||
```
|
|
||||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
|
||||||
go get -d $(cat /tmp/potential-upgrades)
|
|
||||||
go mod tidy -go=1.22 -compat=1.22
|
|
||||||
```
|
|
||||||
|
|
||||||
If the `go mod tidy` fails use the output from it to remove the
|
|
||||||
package which can't be upgraded from `/tmp/potential-upgrades` when
|
|
||||||
done
|
|
||||||
|
|
||||||
```
|
|
||||||
git co go.mod go.sum
|
|
||||||
```
|
|
||||||
|
|
||||||
And try again.
|
|
||||||
|
|
||||||
Optionally upgrade the direct and indirect dependencies. This is very
|
|
||||||
likely to fail if the manual method was used abve - in that case
|
|
||||||
ignore it as it is too time consuming to fix.
|
|
||||||
|
|
||||||
* `make update`
|
|
||||||
* `make GOTAGS=cmount`
|
|
||||||
* `make compiletest`
|
|
||||||
* roll back any updates which didn't compile
|
* roll back any updates which didn't compile
|
||||||
* `git commit -a -v --amend`
|
* git commit -a -v --amend
|
||||||
* **NB** watch out for this changing the default go version in `go.mod`
|
|
||||||
|
|
||||||
Note that `make update` updates all direct and indirect dependencies
|
Note that `make update` updates all direct and indirect dependencies
|
||||||
and there can occasionally be forwards compatibility problems with
|
and there can occasionally be forwards compatibility problems with
|
||||||
@@ -90,19 +54,6 @@ doing that so it may be necessary to roll back dependencies to the
|
|||||||
version specified by `make updatedirect` in order to get rclone to
|
version specified by `make updatedirect` in order to get rclone to
|
||||||
build.
|
build.
|
||||||
|
|
||||||
Once it compiles locally, push it on a test branch and commit fixes
|
|
||||||
until the tests pass.
|
|
||||||
|
|
||||||
### Major versions
|
|
||||||
|
|
||||||
The above procedure will not upgrade major versions, so v2 to v3.
|
|
||||||
However this tool can show which major versions might need to be
|
|
||||||
upgraded:
|
|
||||||
|
|
||||||
go run github.com/icholy/gomajor@latest list -major
|
|
||||||
|
|
||||||
Expect API breakage when updating major versions.
|
|
||||||
|
|
||||||
## Tidy beta
|
## Tidy beta
|
||||||
|
|
||||||
At some point after the release run
|
At some point after the release run
|
||||||
@@ -131,21 +82,14 @@ Now
|
|||||||
|
|
||||||
* git co ${BASE_TAG}-stable
|
* git co ${BASE_TAG}-stable
|
||||||
* git cherry-pick any fixes
|
* git cherry-pick any fixes
|
||||||
* make startstable
|
|
||||||
* Do the steps as above
|
* Do the steps as above
|
||||||
|
* make startstable
|
||||||
* git co master
|
* git co master
|
||||||
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||||
* git push
|
* git push
|
||||||
|
|
||||||
## Sponsor logos
|
|
||||||
|
|
||||||
If updating the website note that the sponsor logos have been moved out of the main repository.
|
|
||||||
|
|
||||||
You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
|
|
||||||
which is a private repo containing artwork from sponsors.
|
|
||||||
|
|
||||||
## Update the website between releases
|
## Update the website between releases
|
||||||
|
|
||||||
Create an update website branch based off the last release
|
Create an update website branch based off the last release
|
||||||
@@ -170,23 +114,32 @@ Cherry pick any changes back to master and the stable branch if it is active.
|
|||||||
|
|
||||||
## Making a manual build of docker
|
## Making a manual build of docker
|
||||||
|
|
||||||
To do a basic build of rclone's docker image to debug builds locally:
|
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
||||||
|
or needs to be updated then rebuild like this.
|
||||||
|
|
||||||
|
See: https://github.com/ilteoood/docker_buildx/issues/19
|
||||||
|
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
|
||||||
|
|
||||||
```
|
```
|
||||||
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
git co v1.54.1
|
||||||
docker run --rm rclone/rclone:testing version
|
docker pull golang
|
||||||
|
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
docker buildx create --name actions_builder --use
|
||||||
|
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
|
||||||
|
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||||
|
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
|
||||||
|
echo "Supported platforms: $SUPPORTED_PLATFORMS"
|
||||||
|
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||||
|
docker buildx stop actions_builder
|
||||||
```
|
```
|
||||||
|
|
||||||
To test the multipatform build
|
### Old build for linux/amd64 only
|
||||||
|
|
||||||
```
|
```
|
||||||
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
docker pull golang
|
||||||
```
|
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||||
|
docker push rclone/rclone:1.52.0
|
||||||
To make a full build then set the tags correctly and add `--push`
|
docker push rclone/rclone:1.52
|
||||||
|
docker push rclone/rclone:1
|
||||||
Note that you can't only build one architecture - you need to build them all.
|
docker push rclone/rclone:latest
|
||||||
|
|
||||||
```
|
|
||||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
|
|||||||
configfile.Install()
|
configfile.Install()
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.FileSetValue(remoteName, "type", "alias")
|
config.FileSet(remoteName, "type", "alias")
|
||||||
config.FileSetValue(remoteName, "remote", root)
|
config.FileSet(remoteName, "remote", root)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewFS(t *testing.T) {
|
func TestNewFS(t *testing.T) {
|
||||||
@@ -81,12 +81,10 @@ func TestNewFS(t *testing.T) {
|
|||||||
for i, gotEntry := range gotEntries {
|
for i, gotEntry := range gotEntries {
|
||||||
what := fmt.Sprintf("%s, entry=%d", what, i)
|
what := fmt.Sprintf("%s, entry=%d", what, i)
|
||||||
wantEntry := test.entries[i]
|
wantEntry := test.entries[i]
|
||||||
_, isDir := gotEntry.(fs.Directory)
|
|
||||||
|
|
||||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||||
if !isDir {
|
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
||||||
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
_, isDir := gotEntry.(fs.Directory)
|
||||||
}
|
|
||||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,37 +4,29 @@ package all
|
|||||||
import (
|
import (
|
||||||
// Active file systems
|
// Active file systems
|
||||||
_ "github.com/rclone/rclone/backend/alias"
|
_ "github.com/rclone/rclone/backend/alias"
|
||||||
|
_ "github.com/rclone/rclone/backend/amazonclouddrive"
|
||||||
_ "github.com/rclone/rclone/backend/azureblob"
|
_ "github.com/rclone/rclone/backend/azureblob"
|
||||||
_ "github.com/rclone/rclone/backend/azurefiles"
|
|
||||||
_ "github.com/rclone/rclone/backend/b2"
|
_ "github.com/rclone/rclone/backend/b2"
|
||||||
_ "github.com/rclone/rclone/backend/box"
|
_ "github.com/rclone/rclone/backend/box"
|
||||||
_ "github.com/rclone/rclone/backend/cache"
|
_ "github.com/rclone/rclone/backend/cache"
|
||||||
_ "github.com/rclone/rclone/backend/chunker"
|
_ "github.com/rclone/rclone/backend/chunker"
|
||||||
_ "github.com/rclone/rclone/backend/cloudinary"
|
|
||||||
_ "github.com/rclone/rclone/backend/combine"
|
_ "github.com/rclone/rclone/backend/combine"
|
||||||
_ "github.com/rclone/rclone/backend/compress"
|
_ "github.com/rclone/rclone/backend/compress"
|
||||||
_ "github.com/rclone/rclone/backend/crypt"
|
_ "github.com/rclone/rclone/backend/crypt"
|
||||||
_ "github.com/rclone/rclone/backend/doi"
|
|
||||||
_ "github.com/rclone/rclone/backend/drive"
|
_ "github.com/rclone/rclone/backend/drive"
|
||||||
_ "github.com/rclone/rclone/backend/dropbox"
|
_ "github.com/rclone/rclone/backend/dropbox"
|
||||||
_ "github.com/rclone/rclone/backend/fichier"
|
_ "github.com/rclone/rclone/backend/fichier"
|
||||||
_ "github.com/rclone/rclone/backend/filefabric"
|
_ "github.com/rclone/rclone/backend/filefabric"
|
||||||
_ "github.com/rclone/rclone/backend/filelu"
|
|
||||||
_ "github.com/rclone/rclone/backend/filescom"
|
|
||||||
_ "github.com/rclone/rclone/backend/ftp"
|
_ "github.com/rclone/rclone/backend/ftp"
|
||||||
_ "github.com/rclone/rclone/backend/gofile"
|
|
||||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||||
_ "github.com/rclone/rclone/backend/hasher"
|
_ "github.com/rclone/rclone/backend/hasher"
|
||||||
_ "github.com/rclone/rclone/backend/hdfs"
|
_ "github.com/rclone/rclone/backend/hdfs"
|
||||||
_ "github.com/rclone/rclone/backend/hidrive"
|
_ "github.com/rclone/rclone/backend/hidrive"
|
||||||
_ "github.com/rclone/rclone/backend/http"
|
_ "github.com/rclone/rclone/backend/http"
|
||||||
_ "github.com/rclone/rclone/backend/iclouddrive"
|
|
||||||
_ "github.com/rclone/rclone/backend/imagekit"
|
|
||||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||||
_ "github.com/rclone/rclone/backend/koofr"
|
_ "github.com/rclone/rclone/backend/koofr"
|
||||||
_ "github.com/rclone/rclone/backend/linkbox"
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
_ "github.com/rclone/rclone/backend/mailru"
|
_ "github.com/rclone/rclone/backend/mailru"
|
||||||
_ "github.com/rclone/rclone/backend/mega"
|
_ "github.com/rclone/rclone/backend/mega"
|
||||||
@@ -45,7 +37,6 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||||
_ "github.com/rclone/rclone/backend/pcloud"
|
_ "github.com/rclone/rclone/backend/pcloud"
|
||||||
_ "github.com/rclone/rclone/backend/pikpak"
|
_ "github.com/rclone/rclone/backend/pikpak"
|
||||||
_ "github.com/rclone/rclone/backend/pixeldrain"
|
|
||||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||||
_ "github.com/rclone/rclone/backend/protondrive"
|
_ "github.com/rclone/rclone/backend/protondrive"
|
||||||
_ "github.com/rclone/rclone/backend/putio"
|
_ "github.com/rclone/rclone/backend/putio"
|
||||||
@@ -60,7 +51,6 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/storj"
|
_ "github.com/rclone/rclone/backend/storj"
|
||||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||||
_ "github.com/rclone/rclone/backend/swift"
|
_ "github.com/rclone/rclone/backend/swift"
|
||||||
_ "github.com/rclone/rclone/backend/ulozto"
|
|
||||||
_ "github.com/rclone/rclone/backend/union"
|
_ "github.com/rclone/rclone/backend/union"
|
||||||
_ "github.com/rclone/rclone/backend/uptobox"
|
_ "github.com/rclone/rclone/backend/uptobox"
|
||||||
_ "github.com/rclone/rclone/backend/webdav"
|
_ "github.com/rclone/rclone/backend/webdav"
|
||||||
|
|||||||
1369
backend/amazonclouddrive/amazonclouddrive.go
Normal file
1369
backend/amazonclouddrive/amazonclouddrive.go
Normal file
File diff suppressed because it is too large
Load Diff
21
backend/amazonclouddrive/amazonclouddrive_test.go
Normal file
21
backend/amazonclouddrive/amazonclouddrive_test.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
// Test AmazonCloudDrive filesystem interface
|
||||||
|
|
||||||
|
//go:build acd
|
||||||
|
// +build acd
|
||||||
|
|
||||||
|
package amazonclouddrive_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/amazonclouddrive"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||||
|
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||||
|
fstests.Run(t)
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,151 +1,37 @@
|
|||||||
//go:build !plan9 && !solaris && !js
|
//go:build !plan9 && !solaris && !js
|
||||||
|
// +build !plan9,!solaris,!js
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/rclone/rclone/lib/random"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBlockIDCreator(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
// Check creation and random number
|
// Check first feature flags are set on this
|
||||||
bic, err := newBlockIDCreator()
|
// remote
|
||||||
require.NoError(t, err)
|
|
||||||
bic2, err := newBlockIDCreator()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotEqual(t, bic.random, bic2.random)
|
|
||||||
assert.NotEqual(t, bic.random, [8]byte{})
|
|
||||||
|
|
||||||
// Set random to known value for tests
|
|
||||||
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
|
|
||||||
chunkNumber := uint64(0xFEDCBA9876543210)
|
|
||||||
|
|
||||||
// Check creation of ID
|
|
||||||
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
|
|
||||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
|
|
||||||
got := bic.newBlockID(chunkNumber)
|
|
||||||
assert.Equal(t, want, got)
|
|
||||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
|
|
||||||
|
|
||||||
// Test checkID is working
|
|
||||||
assert.NoError(t, bic.checkID(chunkNumber, got))
|
|
||||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
|
|
||||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
|
|
||||||
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
|
|
||||||
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) testFeatures(t *testing.T) {
|
|
||||||
// Check first feature flags are set on this remote
|
|
||||||
enabled := f.Features().SetTier
|
enabled := f.Features().SetTier
|
||||||
assert.True(t, enabled)
|
assert.True(t, enabled)
|
||||||
enabled = f.Features().GetTier
|
enabled = f.Features().GetTier
|
||||||
assert.True(t, enabled)
|
assert.True(t, enabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
type ReadSeekCloser struct {
|
func TestIncrement(t *testing.T) {
|
||||||
*strings.Reader
|
for _, test := range []struct {
|
||||||
}
|
in [8]byte
|
||||||
|
want [8]byte
|
||||||
func (r *ReadSeekCloser) Close() error {
|
}{
|
||||||
return nil
|
{[8]byte{0, 0, 0, 0}, [8]byte{1, 0, 0, 0}},
|
||||||
}
|
{[8]byte{0xFE, 0, 0, 0}, [8]byte{0xFF, 0, 0, 0}},
|
||||||
|
{[8]byte{0xFF, 0, 0, 0}, [8]byte{0, 1, 0, 0}},
|
||||||
// Stage a block at remote but don't commit it
|
{[8]byte{0, 1, 0, 0}, [8]byte{1, 1, 0, 0}},
|
||||||
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
|
{[8]byte{0xFF, 0xFF, 0xFF, 0xFE}, [8]byte{0, 0, 0, 0xFF}},
|
||||||
var (
|
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 1}},
|
||||||
containerName, blobPath = f.split(remote)
|
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 0, 0, 0}},
|
||||||
containerClient = f.cntSVC(containerName)
|
} {
|
||||||
blobClient = containerClient.NewBlockBlobClient(blobPath)
|
increment(&test.in)
|
||||||
data = "uncommitted data"
|
assert.Equal(t, test.want, test.in)
|
||||||
blockID = "1"
|
|
||||||
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
|
|
||||||
)
|
|
||||||
r := &ReadSeekCloser{strings.NewReader(data)}
|
|
||||||
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify the block is staged but not committed
|
|
||||||
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
found := false
|
|
||||||
for _, block := range blockList.UncommittedBlocks {
|
|
||||||
if *block.Name == blockIDBase64 {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
require.True(t, found, "Block ID not found in uncommitted blocks")
|
|
||||||
}
|
|
||||||
|
|
||||||
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
|
|
||||||
//
|
|
||||||
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
|
|
||||||
//
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
|
|
||||||
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
|
|
||||||
var (
|
|
||||||
ctx = context.Background()
|
|
||||||
remote = "testBlob"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Multipart copy the blob please
|
|
||||||
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
|
|
||||||
f.opt.UseCopyBlob = false
|
|
||||||
f.opt.CopyCutoff = f.opt.ChunkSize
|
|
||||||
defer func() {
|
|
||||||
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Create a blob with uncommitted blocks
|
|
||||||
f.stageBlockWithoutCommit(ctx, t, remote)
|
|
||||||
|
|
||||||
// Now attempt to overwrite the block with a different sized block ID to provoke this error
|
|
||||||
|
|
||||||
// Check the object does not exist
|
|
||||||
_, err := f.NewObject(ctx, remote)
|
|
||||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
|
||||||
|
|
||||||
// Upload a multipart file over the block with uncommitted chunks of a different ID size
|
|
||||||
size := 4*int(f.opt.ChunkSize) - 1
|
|
||||||
contents := random.String(size)
|
|
||||||
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
|
||||||
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
|
||||||
|
|
||||||
// Check size
|
|
||||||
assert.Equal(t, int64(size), o.Size())
|
|
||||||
|
|
||||||
// Create a new blob with uncommitted blocks
|
|
||||||
newRemote := "testBlob2"
|
|
||||||
f.stageBlockWithoutCommit(ctx, t, newRemote)
|
|
||||||
|
|
||||||
// Copy over that block
|
|
||||||
dst, err := f.Copy(ctx, o, newRemote)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Check basics
|
|
||||||
assert.Equal(t, int64(size), dst.Size())
|
|
||||||
assert.Equal(t, newRemote, dst.Remote())
|
|
||||||
|
|
||||||
// Check contents
|
|
||||||
gotContents := fstests.ReadObject(ctx, t, dst, -1)
|
|
||||||
assert.Equal(t, contents, gotContents)
|
|
||||||
|
|
||||||
// Remove the object
|
|
||||||
require.NoError(t, dst.Remove(ctx))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
t.Run("Features", f.testFeatures)
|
|
||||||
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
// Test AzureBlob filesystem interface
|
// Test AzureBlob filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !solaris && !js
|
//go:build !plan9 && !solaris && !js
|
||||||
|
// +build !plan9,!solaris,!js
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
@@ -15,17 +16,13 @@ import (
|
|||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
name := "TestAzureBlob"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: name + ":",
|
RemoteName: "TestAzureBlob:",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*Object)(nil),
|
||||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
TiersToTest: []string{"Hot", "Cool"},
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||||
MinChunkSize: defaultChunkSize,
|
MinChunkSize: defaultChunkSize,
|
||||||
},
|
},
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -38,13 +35,12 @@ func TestIntegration2(t *testing.T) {
|
|||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: name + ":",
|
RemoteName: name + ":",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*Object)(nil),
|
||||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
TiersToTest: []string{"Hot", "Cool"},
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||||
MinChunkSize: defaultChunkSize,
|
MinChunkSize: defaultChunkSize,
|
||||||
},
|
},
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
ExtraConfig: []fstests.ExtraConfigItem{
|
||||||
{Name: name, Key: "directory_markers", Value: "true"},
|
{Name: name, Key: "directory_markers", Value: "true"},
|
||||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -53,13 +49,8 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|||||||
return f.setUploadChunkSize(cs)
|
return f.setUploadChunkSize(cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setCopyCutoff(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestValidateAccessTier(t *testing.T) {
|
func TestValidateAccessTier(t *testing.T) {
|
||||||
@@ -71,7 +62,6 @@ func TestValidateAccessTier(t *testing.T) {
|
|||||||
"HOT": {"HOT", true},
|
"HOT": {"HOT", true},
|
||||||
"Hot": {"Hot", true},
|
"Hot": {"Hot", true},
|
||||||
"cool": {"cool", true},
|
"cool": {"cool", true},
|
||||||
"cold": {"cold", true},
|
|
||||||
"archive": {"archive", true},
|
"archive": {"archive", true},
|
||||||
"empty": {"", false},
|
"empty": {"", false},
|
||||||
"unknown": {"unknown", false},
|
"unknown": {"unknown", false},
|
||||||
|
|||||||
@@ -2,6 +2,6 @@
|
|||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || solaris || js
|
//go:build plan9 || solaris || js
|
||||||
|
// +build plan9 solaris js
|
||||||
|
|
||||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,69 +0,0 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
|
|
||||||
package azurefiles
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"math/rand"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
t.Run("Authentication", f.InternalTestAuth)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestAuth(t *testing.T) {
|
|
||||||
t.Skip("skipping since this requires authentication credentials which are not part of repo")
|
|
||||||
shareName := "test-rclone-oct-2023"
|
|
||||||
testCases := []struct {
|
|
||||||
name string
|
|
||||||
options *Options
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "ConnectionString",
|
|
||||||
options: &Options{
|
|
||||||
ShareName: shareName,
|
|
||||||
ConnectionString: "",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "AccountAndKey",
|
|
||||||
options: &Options{
|
|
||||||
ShareName: shareName,
|
|
||||||
Account: "",
|
|
||||||
Key: "",
|
|
||||||
}},
|
|
||||||
{
|
|
||||||
name: "SASUrl",
|
|
||||||
options: &Options{
|
|
||||||
ShareName: shareName,
|
|
||||||
SASURL: "",
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testCases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
fs, err := newFsFromOptions(context.TODO(), "TestAzureFiles", "", tc.options)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
dirName := randomString(10)
|
|
||||||
assert.NoError(t, fs.Mkdir(context.TODO(), dirName))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
|
|
||||||
|
|
||||||
func randomString(charCount int) string {
|
|
||||||
strBldr := strings.Builder{}
|
|
||||||
for range charCount {
|
|
||||||
randPos := rand.Int63n(52)
|
|
||||||
strBldr.WriteByte(chars[randPos])
|
|
||||||
}
|
|
||||||
return strBldr.String()
|
|
||||||
}
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
|
|
||||||
package azurefiles
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
var objPtr *Object
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestAzureFiles:",
|
|
||||||
NilObject: objPtr,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Build for azurefiles for unsupported platforms to stop go complaining
|
|
||||||
// about "no buildable Go source files "
|
|
||||||
|
|
||||||
//go:build plan9 || js
|
|
||||||
|
|
||||||
// Package azurefiles provides an interface to Microsoft Azure Files
|
|
||||||
package azurefiles
|
|
||||||
@@ -33,19 +33,10 @@ var _ fserrors.Fataler = (*Error)(nil)
|
|||||||
|
|
||||||
// Bucket describes a B2 bucket
|
// Bucket describes a B2 bucket
|
||||||
type Bucket struct {
|
type Bucket struct {
|
||||||
ID string `json:"bucketId"`
|
ID string `json:"bucketId"`
|
||||||
AccountID string `json:"accountId"`
|
AccountID string `json:"accountId"`
|
||||||
Name string `json:"bucketName"`
|
Name string `json:"bucketName"`
|
||||||
Type string `json:"bucketType"`
|
Type string `json:"bucketType"`
|
||||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// LifecycleRule is a single lifecycle rule
|
|
||||||
type LifecycleRule struct {
|
|
||||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
|
||||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
|
||||||
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"`
|
|
||||||
FileNamePrefix string `json:"fileNamePrefix"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||||
@@ -130,10 +121,10 @@ type AuthorizeAccountResponse struct {
|
|||||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||||
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||||
} `json:"allowed"`
|
} `json:"allowed"`
|
||||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||||
@@ -215,10 +206,9 @@ type FileInfo struct {
|
|||||||
|
|
||||||
// CreateBucketRequest is used to create a bucket
|
// CreateBucketRequest is used to create a bucket
|
||||||
type CreateBucketRequest struct {
|
type CreateBucketRequest struct {
|
||||||
AccountID string `json:"accountId"`
|
AccountID string `json:"accountId"`
|
||||||
Name string `json:"bucketName"`
|
Name string `json:"bucketName"`
|
||||||
Type string `json:"bucketType"`
|
Type string `json:"bucketType"`
|
||||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteBucketRequest is used to create a bucket
|
// DeleteBucketRequest is used to create a bucket
|
||||||
@@ -341,11 +331,3 @@ type CopyPartRequest struct {
|
|||||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateBucketRequest describes a request to modify a B2 bucket
|
|
||||||
type UpdateBucketRequest struct {
|
|
||||||
ID string `json:"bucketId"`
|
|
||||||
AccountID string `json:"accountId"`
|
|
||||||
Type string `json:"bucketType,omitempty"`
|
|
||||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTimestampEqual(t *testing.T) {
|
func TestTimestampEqual(t *testing.T) {
|
||||||
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
assert.False(t, emptyT.Equal(emptyT))
|
||||||
assert.False(t, t0.Equal(emptyT))
|
assert.False(t, t0.Equal(emptyT))
|
||||||
assert.False(t, emptyT.Equal(t0))
|
assert.False(t, emptyT.Equal(t0))
|
||||||
assert.False(t, t0.Equal(t1))
|
assert.False(t, t0.Equal(t1))
|
||||||
assert.False(t, t1.Equal(t0))
|
assert.False(t, t1.Equal(t0))
|
||||||
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
assert.True(t, t0.Equal(t0))
|
||||||
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
assert.True(t, t1.Equal(t1))
|
||||||
}
|
}
|
||||||
|
|||||||
440
backend/b2/b2.go
440
backend/b2/b2.go
@@ -9,14 +9,12 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
gohash "hash"
|
gohash "hash"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"slices"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -31,8 +29,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/list"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/multipart"
|
"github.com/rclone/rclone/lib/multipart"
|
||||||
@@ -62,7 +59,6 @@ const (
|
|||||||
defaultChunkSize = 96 * fs.Mebi
|
defaultChunkSize = 96 * fs.Mebi
|
||||||
defaultUploadCutoff = 200 * fs.Mebi
|
defaultUploadCutoff = 200 * fs.Mebi
|
||||||
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
||||||
defaultMaxAge = 24 * time.Hour
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
@@ -77,7 +73,6 @@ func init() {
|
|||||||
Name: "b2",
|
Name: "b2",
|
||||||
Description: "Backblaze B2",
|
Description: "Backblaze B2",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "account",
|
Name: "account",
|
||||||
Help: "Account ID or Application Key ID.",
|
Help: "Account ID or Application Key ID.",
|
||||||
@@ -104,7 +99,7 @@ below will cause b2 to return specific errors:
|
|||||||
* "force_cap_exceeded"
|
* "force_cap_exceeded"
|
||||||
|
|
||||||
These will be set in the "X-Bz-Test-Mode" header which is documented
|
These will be set in the "X-Bz-Test-Mode" header which is documented
|
||||||
in the [b2 integrations checklist](https://www.backblaze.com/docs/cloud-storage-integration-checklist).`,
|
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
|
||||||
Default: "",
|
Default: "",
|
||||||
Hide: fs.OptionHideConfigurator,
|
Hide: fs.OptionHideConfigurator,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
@@ -196,12 +191,9 @@ Example:
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "download_auth_duration",
|
Name: "download_auth_duration",
|
||||||
Help: `Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.
|
Help: `Time before the authorization token will expire in s or suffix ms|s|m|h|d.
|
||||||
|
|
||||||
This is used in combination with "rclone link" for making files
|
|
||||||
accessible to the public and sets the duration before the download
|
|
||||||
authorization token will expire.
|
|
||||||
|
|
||||||
|
The duration before the download authorization token will expire.
|
||||||
The minimum value is 1 second. The maximum value is one week.`,
|
The minimum value is 1 second. The maximum value is one week.`,
|
||||||
Default: fs.Duration(7 * 24 * time.Hour),
|
Default: fs.Duration(7 * 24 * time.Hour),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
@@ -217,36 +209,11 @@ The minimum value is 1 second. The maximum value is one week.`,
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
Hide: fs.OptionHideBoth,
|
Hide: fs.OptionHideBoth,
|
||||||
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
|
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
|
||||||
}, {
|
|
||||||
Name: "lifecycle",
|
|
||||||
Help: `Set the number of days deleted files should be kept when creating a bucket.
|
|
||||||
|
|
||||||
On bucket creation, this parameter is used to create a lifecycle rule
|
|
||||||
for the entire bucket.
|
|
||||||
|
|
||||||
If lifecycle is 0 (the default) it does not create a lifecycle rule so
|
|
||||||
the default B2 behaviour applies. This is to create versions of files
|
|
||||||
on delete and overwrite and to keep them indefinitely.
|
|
||||||
|
|
||||||
If lifecycle is >0 then it creates a single rule setting the number of
|
|
||||||
days before a file that is deleted or overwritten is deleted
|
|
||||||
permanently. This is known as daysFromHidingToDeleting in the b2 docs.
|
|
||||||
|
|
||||||
The minimum value for this parameter is 1 day.
|
|
||||||
|
|
||||||
You can also enable hard_delete in the config also which will mean
|
|
||||||
deletions won't cause versions but overwrites will still cause
|
|
||||||
versions to be made.
|
|
||||||
|
|
||||||
See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket creation.
|
|
||||||
`,
|
|
||||||
Default: 0,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
// See: https://www.backblaze.com/docs/cloud-storage-files
|
// See: https://www.backblaze.com/b2/docs/files.html
|
||||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||||
// FIXME: allow /, but not leading, trailing or double
|
// FIXME: allow /, but not leading, trailing or double
|
||||||
Default: (encoder.Display |
|
Default: (encoder.Display |
|
||||||
@@ -272,7 +239,6 @@ type Options struct {
|
|||||||
DisableCheckSum bool `config:"disable_checksum"`
|
DisableCheckSum bool `config:"disable_checksum"`
|
||||||
DownloadURL string `config:"download_url"`
|
DownloadURL string `config:"download_url"`
|
||||||
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
||||||
Lifecycle int `config:"lifecycle"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -365,7 +331,7 @@ var retryErrorCodes = []int{
|
|||||||
504, // Gateway Time-out
|
504, // Gateway Time-out
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldRetryNoReauth returns a boolean as to whether this resp and err
|
// shouldRetryNoAuth returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||||
if fserrors.ContextError(ctx, &err) {
|
if fserrors.ContextError(ctx, &err) {
|
||||||
@@ -406,18 +372,11 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
|||||||
|
|
||||||
// errorHandler parses a non 2xx error response into an error
|
// errorHandler parses a non 2xx error response into an error
|
||||||
func errorHandler(resp *http.Response) error {
|
func errorHandler(resp *http.Response) error {
|
||||||
body, err := rest.ReadBody(resp)
|
// Decode error response
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "Couldn't read error out of body: %v", err)
|
|
||||||
body = nil
|
|
||||||
}
|
|
||||||
// Decode error response if there was one - they can be blank
|
|
||||||
errResponse := new(api.Error)
|
errResponse := new(api.Error)
|
||||||
if len(body) > 0 {
|
err := rest.DecodeJSON(resp, &errResponse)
|
||||||
err = json.Unmarshal(body, errResponse)
|
if err != nil {
|
||||||
if err != nil {
|
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
||||||
fs.Errorf(nil, "Couldn't decode error response: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if errResponse.Code == "" {
|
if errResponse.Code == "" {
|
||||||
errResponse.Code = "unknown"
|
errResponse.Code = "unknown"
|
||||||
@@ -461,14 +420,6 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|
||||||
err = checkUploadChunkSize(cs)
|
|
||||||
if err == nil {
|
|
||||||
old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// setRoot changes the root of the Fs
|
// setRoot changes the root of the Fs
|
||||||
func (f *Fs) setRoot(root string) {
|
func (f *Fs) setRoot(root string) {
|
||||||
f.root = parsePath(root)
|
f.root = parsePath(root)
|
||||||
@@ -519,11 +470,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
BucketBasedRootOK: true,
|
BucketBasedRootOK: true,
|
||||||
ChunkWriterDoesntSeek: true,
|
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
// Set the test flag if required
|
// Set the test flag if required
|
||||||
if opt.TestMode != "" {
|
if opt.TestMode != "" {
|
||||||
@@ -590,7 +540,12 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
|||||||
|
|
||||||
// hasPermission returns if the current AuthorizationToken has the selected permission
|
// hasPermission returns if the current AuthorizationToken has the selected permission
|
||||||
func (f *Fs) hasPermission(permission string) bool {
|
func (f *Fs) hasPermission(permission string) bool {
|
||||||
return slices.Contains(f.info.Allowed.Capabilities, permission)
|
for _, capability := range f.info.Allowed.Capabilities {
|
||||||
|
if capability == permission {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||||
@@ -869,7 +824,7 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
|||||||
|
|
||||||
// listBuckets returns all the buckets to out
|
// listBuckets returns all the buckets to out
|
||||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||||
err = f.listBucketsToFn(ctx, "", func(bucket *api.Bucket) error {
|
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
|
||||||
d := fs.NewDir(bucket.Name, time.Time{})
|
d := fs.NewDir(bucket.Name, time.Time{})
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
return nil
|
return nil
|
||||||
@@ -918,7 +873,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
bucket, directory := f.split(dir)
|
bucket, directory := f.split(dir)
|
||||||
list := list.NewHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||||
last := ""
|
last := ""
|
||||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
|
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||||
@@ -962,14 +917,11 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
type listBucketFn func(*api.Bucket) error
|
type listBucketFn func(*api.Bucket) error
|
||||||
|
|
||||||
// listBucketsToFn lists the buckets to the function supplied
|
// listBucketsToFn lists the buckets to the function supplied
|
||||||
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
|
func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
|
||||||
var account = api.ListBucketsRequest{
|
var account = api.ListBucketsRequest{
|
||||||
AccountID: f.info.AccountID,
|
AccountID: f.info.AccountID,
|
||||||
BucketID: f.info.Allowed.BucketID,
|
BucketID: f.info.Allowed.BucketID,
|
||||||
}
|
}
|
||||||
if bucketName != "" && account.BucketID == "" {
|
|
||||||
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
|
||||||
}
|
|
||||||
|
|
||||||
var response api.ListBucketsResponse
|
var response api.ListBucketsResponse
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@@ -1015,7 +967,7 @@ func (f *Fs) getbucketType(ctx context.Context, bucket string) (bucketType strin
|
|||||||
if bucketType != "" {
|
if bucketType != "" {
|
||||||
return bucketType, nil
|
return bucketType, nil
|
||||||
}
|
}
|
||||||
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
|
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
|
||||||
// listBucketsToFn reads bucket Types
|
// listBucketsToFn reads bucket Types
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -1050,7 +1002,7 @@ func (f *Fs) getBucketID(ctx context.Context, bucket string) (bucketID string, e
|
|||||||
if bucketID != "" {
|
if bucketID != "" {
|
||||||
return bucketID, nil
|
return bucketID, nil
|
||||||
}
|
}
|
||||||
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
|
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
|
||||||
// listBucketsToFn sets IDs
|
// listBucketsToFn sets IDs
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -1114,11 +1066,6 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
|||||||
Name: f.opt.Enc.FromStandardName(bucket),
|
Name: f.opt.Enc.FromStandardName(bucket),
|
||||||
Type: "allPrivate",
|
Type: "allPrivate",
|
||||||
}
|
}
|
||||||
if f.opt.Lifecycle > 0 {
|
|
||||||
request.LifecycleRules = []api.LifecycleRule{{
|
|
||||||
DaysFromHidingToDeleting: &f.opt.Lifecycle,
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
var response api.Bucket
|
var response api.Bucket
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
@@ -1246,7 +1193,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
|
|||||||
// if oldOnly is true then it deletes only non current files.
|
// if oldOnly is true then it deletes only non current files.
|
||||||
//
|
//
|
||||||
// Implemented here so we can make sure we delete old versions.
|
// Implemented here so we can make sure we delete old versions.
|
||||||
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) error {
|
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||||
bucket, directory := f.split(dir)
|
bucket, directory := f.split(dir)
|
||||||
if bucket == "" {
|
if bucket == "" {
|
||||||
return errors.New("can't purge from root")
|
return errors.New("can't purge from root")
|
||||||
@@ -1264,14 +1211,14 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
||||||
return time.Since(time.Time(timestamp)) > maxAge
|
return time.Since(time.Time(timestamp)).Hours() > 24
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete Config.Transfers in parallel
|
// Delete Config.Transfers in parallel
|
||||||
toBeDeleted := make(chan *api.File, f.ci.Transfers)
|
toBeDeleted := make(chan *api.File, f.ci.Transfers)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(f.ci.Transfers)
|
wg.Add(f.ci.Transfers)
|
||||||
for range f.ci.Transfers {
|
for i := 0; i < f.ci.Transfers; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
for object := range toBeDeleted {
|
for object := range toBeDeleted {
|
||||||
@@ -1287,21 +1234,6 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
if oldOnly {
|
|
||||||
if deleteHidden && deleteUnfinished {
|
|
||||||
fs.Infof(f, "cleaning bucket %q of all hidden files, and pending multipart uploads older than %v", bucket, maxAge)
|
|
||||||
} else if deleteHidden {
|
|
||||||
fs.Infof(f, "cleaning bucket %q of all hidden files", bucket)
|
|
||||||
} else if deleteUnfinished {
|
|
||||||
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
|
|
||||||
} else {
|
|
||||||
fs.Errorf(f, "cleaning bucket %q of nothing. This should never happen!", bucket)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fs.Infof(f, "cleaning bucket %q of all files", bucket)
|
|
||||||
}
|
|
||||||
|
|
||||||
last := ""
|
last := ""
|
||||||
checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||||
if !isDirectory {
|
if !isDirectory {
|
||||||
@@ -1312,24 +1244,18 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
|||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||||
if oldOnly && last != remote {
|
if oldOnly && last != remote {
|
||||||
// Check current version of the file
|
// Check current version of the file
|
||||||
if deleteHidden && object.Action == "hide" {
|
if object.Action == "hide" {
|
||||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||||
if !operations.SkipDestructive(ctx, object.Name, "remove hide marker") {
|
toBeDeleted <- object
|
||||||
toBeDeleted <- object
|
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||||
}
|
|
||||||
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
|
||||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||||
if !operations.SkipDestructive(ctx, object.Name, "remove pending upload") {
|
toBeDeleted <- object
|
||||||
toBeDeleted <- object
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
|
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(remote, "Deleting (id %q)", object.ID)
|
fs.Debugf(remote, "Deleting (id %q)", object.ID)
|
||||||
if !operations.SkipDestructive(ctx, object.Name, "delete") {
|
toBeDeleted <- object
|
||||||
toBeDeleted <- object
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
last = remote
|
last = remote
|
||||||
tr.Done(ctx, nil)
|
tr.Done(ctx, nil)
|
||||||
@@ -1347,17 +1273,12 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
|||||||
|
|
||||||
// Purge deletes all the files and directories including the old versions.
|
// Purge deletes all the files and directories including the old versions.
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
return f.purge(ctx, dir, false, false, false, defaultMaxAge)
|
return f.purge(ctx, dir, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanUp deletes all hidden files and pending multipart uploads older than 24 hours.
|
// CleanUp deletes all the hidden files.
|
||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
return f.purge(ctx, "", true, true, true, defaultMaxAge)
|
return f.purge(ctx, "", true)
|
||||||
}
|
|
||||||
|
|
||||||
// cleanUp deletes all hidden files and/or pending multipart uploads older than the specified age.
|
|
||||||
func (f *Fs) cleanUp(ctx context.Context, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) (err error) {
|
|
||||||
return f.purge(ctx, "", true, deleteHidden, deleteUnfinished, maxAge)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy does a server-side copy from dstObj <- srcObj
|
// copy does a server-side copy from dstObj <- srcObj
|
||||||
@@ -1365,7 +1286,7 @@ func (f *Fs) cleanUp(ctx context.Context, deleteHidden bool, deleteUnfinished bo
|
|||||||
// If newInfo is nil then the metadata will be copied otherwise it
|
// If newInfo is nil then the metadata will be copied otherwise it
|
||||||
// will be replaced with newInfo
|
// will be replaced with newInfo
|
||||||
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
|
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
|
||||||
if srcObj.size > int64(f.opt.CopyCutoff) {
|
if srcObj.size >= int64(f.opt.CopyCutoff) {
|
||||||
if newInfo == nil {
|
if newInfo == nil {
|
||||||
newInfo, err = srcObj.getMetaData(ctx)
|
newInfo, err = srcObj.getMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1569,7 +1490,7 @@ func (o *Object) Size() int64 {
|
|||||||
//
|
//
|
||||||
// Make sure it is lower case.
|
// Make sure it is lower case.
|
||||||
//
|
//
|
||||||
// Remove unverified prefix - see https://www.backblaze.com/docs/cloud-storage-upload-files-with-the-native-api
|
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||||
// Some tools (e.g. Cyberduck) use this
|
// Some tools (e.g. Cyberduck) use this
|
||||||
func cleanSHA1(sha1 string) string {
|
func cleanSHA1(sha1 string) string {
|
||||||
const unverified = "unverified:"
|
const unverified = "unverified:"
|
||||||
@@ -1596,11 +1517,7 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
|||||||
o.size = Size
|
o.size = Size
|
||||||
// Use the UploadTimestamp if can't get file info
|
// Use the UploadTimestamp if can't get file info
|
||||||
o.modTime = time.Time(UploadTimestamp)
|
o.modTime = time.Time(UploadTimestamp)
|
||||||
err = o.parseTimeString(Info[timeKey])
|
return o.parseTimeString(Info[timeKey])
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// decodeMetaData sets the metadata in the object from an api.File
|
// decodeMetaData sets the metadata in the object from an api.File
|
||||||
@@ -1673,21 +1590,6 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
|||||||
return o.getMetaDataListing(ctx)
|
return o.getMetaDataListing(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If using versionAt we need to list the find the correct version.
|
|
||||||
if o.fs.opt.VersionAt.IsSet() {
|
|
||||||
info, err := o.getMetaDataListing(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if info.Action == "hide" {
|
|
||||||
// Rerturn object not found error if the current version is deleted.
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
_, info, err = o.getOrHead(ctx, "HEAD", nil)
|
_, info, err = o.getOrHead(ctx, "HEAD", nil)
|
||||||
return info, err
|
return info, err
|
||||||
}
|
}
|
||||||
@@ -1717,16 +1619,6 @@ func timeString(modTime time.Time) string {
|
|||||||
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
|
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseTimeStringHelper converts a decimal string number of milliseconds
|
|
||||||
// elapsed since January 1, 1970 UTC into a time.Time
|
|
||||||
func parseTimeStringHelper(timeString string) (time.Time, error) {
|
|
||||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, err
|
|
||||||
}
|
|
||||||
return time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTimeString converts a decimal string number of milliseconds
|
// parseTimeString converts a decimal string number of milliseconds
|
||||||
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
||||||
// the modTime variable.
|
// the modTime variable.
|
||||||
@@ -1734,12 +1626,12 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
|||||||
if timeString == "" {
|
if timeString == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
modTime, err := parseTimeStringHelper(timeString)
|
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
o.modTime = modTime
|
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1816,14 +1708,14 @@ func (file *openFile) Close() (err error) {
|
|||||||
|
|
||||||
// Check to see we read the correct number of bytes
|
// Check to see we read the correct number of bytes
|
||||||
if file.o.Size() != file.bytes {
|
if file.o.Size() != file.bytes {
|
||||||
return fmt.Errorf("corrupted on transfer: lengths differ want %d vs got %d", file.o.Size(), file.bytes)
|
return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check the SHA1
|
// Check the SHA1
|
||||||
receivedSHA1 := file.o.sha1
|
receivedSHA1 := file.o.sha1
|
||||||
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
|
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
|
||||||
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
|
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
|
||||||
return fmt.Errorf("corrupted on transfer: SHA1 hashes differ want %q vs got %q", receivedSHA1, calculatedSHA1)
|
return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -1893,19 +1785,13 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
|||||||
ContentType: resp.Header.Get("Content-Type"),
|
ContentType: resp.Header.Get("Content-Type"),
|
||||||
Info: Info,
|
Info: Info,
|
||||||
}
|
}
|
||||||
|
|
||||||
// When reading files from B2 via cloudflare using
|
// When reading files from B2 via cloudflare using
|
||||||
// --b2-download-url cloudflare strips the Content-Length
|
// --b2-download-url cloudflare strips the Content-Length
|
||||||
// headers (presumably so it can inject stuff) so use the old
|
// headers (presumably so it can inject stuff) so use the old
|
||||||
// length read from the listing.
|
// length read from the listing.
|
||||||
// Additionally, the official examples return S3 headers
|
|
||||||
// instead of native, i.e. no file ID, use ones from listing.
|
|
||||||
if info.Size < 0 {
|
if info.Size < 0 {
|
||||||
info.Size = o.size
|
info.Size = o.size
|
||||||
}
|
}
|
||||||
if info.ID == "" {
|
|
||||||
info.ID = o.id
|
|
||||||
}
|
|
||||||
return resp, info, nil
|
return resp, info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1955,7 +1841,7 @@ func init() {
|
|||||||
// urlEncode encodes in with % encoding
|
// urlEncode encodes in with % encoding
|
||||||
func urlEncode(in string) string {
|
func urlEncode(in string) string {
|
||||||
var out bytes.Buffer
|
var out bytes.Buffer
|
||||||
for i := range len(in) {
|
for i := 0; i < len(in); i++ {
|
||||||
c := in[i]
|
c := in[i]
|
||||||
if noNeedToEncode[c] {
|
if noNeedToEncode[c] {
|
||||||
_ = out.WriteByte(c)
|
_ = out.WriteByte(c)
|
||||||
@@ -1996,7 +1882,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil, options...)
|
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
o.fs.putRW(rw)
|
o.fs.putRW(rw)
|
||||||
return err
|
return err
|
||||||
@@ -2028,10 +1914,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return o.decodeMetaDataFileInfo(up.info)
|
return o.decodeMetaDataFileInfo(up.info)
|
||||||
}
|
}
|
||||||
|
|
||||||
modTime, err := o.getModTime(ctx, src, options)
|
modTime := src.ModTime(ctx)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
|
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
|
||||||
if calculatedSha1 == "" {
|
if calculatedSha1 == "" {
|
||||||
@@ -2136,36 +2019,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return o.decodeMetaDataFileInfo(&response)
|
return o.decodeMetaDataFileInfo(&response)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get modTime from the source; if --metadata is set, fetch the src metadata and get it from there.
|
|
||||||
// When metadata support is added to b2, this method will need a more generic name
|
|
||||||
func (o *Object) getModTime(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (time.Time, error) {
|
|
||||||
modTime := src.ModTime(ctx)
|
|
||||||
|
|
||||||
// Fetch metadata if --metadata is in use
|
|
||||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, fmt.Errorf("failed to read metadata from source object: %w", err)
|
|
||||||
}
|
|
||||||
// merge metadata into request and user metadata
|
|
||||||
for k, v := range meta {
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
// For now, the only metadata we're concerned with is "mtime"
|
|
||||||
switch k {
|
|
||||||
case "mtime":
|
|
||||||
// mtime in meta overrides source ModTime
|
|
||||||
metaModTime, err := time.Parse(time.RFC3339Nano, v)
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
|
|
||||||
} else {
|
|
||||||
modTime = metaModTime
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
// Do nothing for now
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return modTime, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||||
//
|
//
|
||||||
// Pass in the remote and the src object
|
// Pass in the remote and the src object
|
||||||
@@ -2183,7 +2036,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
|||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
|
|
||||||
bucket, _ := o.split()
|
bucket, _ := o.split()
|
||||||
@@ -2197,7 +2050,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
|||||||
Concurrency: o.fs.opt.UploadConcurrency,
|
Concurrency: o.fs.opt.UploadConcurrency,
|
||||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||||
}
|
}
|
||||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
|
||||||
return info, up, err
|
return info, up, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2226,200 +2079,6 @@ func (o *Object) ID() string {
|
|||||||
return o.id
|
return o.id
|
||||||
}
|
}
|
||||||
|
|
||||||
var lifecycleHelp = fs.CommandHelp{
|
|
||||||
Name: "lifecycle",
|
|
||||||
Short: "Read or set the lifecycle for a bucket",
|
|
||||||
Long: `This command can be used to read or set the lifecycle for a bucket.
|
|
||||||
|
|
||||||
Usage Examples:
|
|
||||||
|
|
||||||
To show the current lifecycle rules:
|
|
||||||
|
|
||||||
rclone backend lifecycle b2:bucket
|
|
||||||
|
|
||||||
This will dump something like this showing the lifecycle rules.
|
|
||||||
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"daysFromHidingToDeleting": 1,
|
|
||||||
"daysFromUploadingToHiding": null,
|
|
||||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
|
||||||
"fileNamePrefix": ""
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
If there are no lifecycle rules (the default) then it will just return [].
|
|
||||||
|
|
||||||
To reset the current lifecycle rules:
|
|
||||||
|
|
||||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
|
||||||
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
|
||||||
|
|
||||||
This will run and then print the new lifecycle rules as above.
|
|
||||||
|
|
||||||
Rclone only lets you set lifecycles for the whole bucket with the
|
|
||||||
fileNamePrefix = "".
|
|
||||||
|
|
||||||
You can't disable versioning with B2. The best you can do is to set
|
|
||||||
the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
|
|
||||||
the config also which will mean deletions won't cause versions but
|
|
||||||
overwrites will still cause versions to be made.
|
|
||||||
|
|
||||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
|
||||||
|
|
||||||
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
|
||||||
`,
|
|
||||||
Opts: map[string]string{
|
|
||||||
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
|
||||||
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
|
||||||
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
|
||||||
var newRule api.LifecycleRule
|
|
||||||
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
|
|
||||||
days, err := strconv.Atoi(daysStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("bad daysFromHidingToDeleting: %w", err)
|
|
||||||
}
|
|
||||||
newRule.DaysFromHidingToDeleting = &days
|
|
||||||
}
|
|
||||||
if daysStr := opt["daysFromUploadingToHiding"]; daysStr != "" {
|
|
||||||
days, err := strconv.Atoi(daysStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("bad daysFromUploadingToHiding: %w", err)
|
|
||||||
}
|
|
||||||
newRule.DaysFromUploadingToHiding = &days
|
|
||||||
}
|
|
||||||
if daysStr := opt["daysFromStartingToCancelingUnfinishedLargeFiles"]; daysStr != "" {
|
|
||||||
days, err := strconv.Atoi(daysStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("bad daysFromStartingToCancelingUnfinishedLargeFiles: %w", err)
|
|
||||||
}
|
|
||||||
newRule.DaysFromStartingToCancelingUnfinishedLargeFiles = &days
|
|
||||||
}
|
|
||||||
bucketName, _ := f.split("")
|
|
||||||
if bucketName == "" {
|
|
||||||
return nil, errors.New("bucket required")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
skip := operations.SkipDestructive(ctx, name, "update lifecycle rules")
|
|
||||||
|
|
||||||
var bucket *api.Bucket
|
|
||||||
if !skip && (newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil) {
|
|
||||||
bucketID, err := f.getBucketID(ctx, bucketName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/b2_update_bucket",
|
|
||||||
}
|
|
||||||
var request = api.UpdateBucketRequest{
|
|
||||||
ID: bucketID,
|
|
||||||
AccountID: f.info.AccountID,
|
|
||||||
LifecycleRules: []api.LifecycleRule{newRule},
|
|
||||||
}
|
|
||||||
var response api.Bucket
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
|
||||||
return f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
bucket = &response
|
|
||||||
} else {
|
|
||||||
err = f.listBucketsToFn(ctx, bucketName, func(b *api.Bucket) error {
|
|
||||||
bucket = b
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if bucket == nil {
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
return bucket.LifecycleRules, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var cleanupHelp = fs.CommandHelp{
|
|
||||||
Name: "cleanup",
|
|
||||||
Short: "Remove unfinished large file uploads.",
|
|
||||||
Long: `This command removes unfinished large file uploads of age greater than
|
|
||||||
max-age, which defaults to 24 hours.
|
|
||||||
|
|
||||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
|
||||||
it would do.
|
|
||||||
|
|
||||||
rclone backend cleanup b2:bucket/path/to/object
|
|
||||||
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
|
||||||
|
|
||||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
|
||||||
`,
|
|
||||||
Opts: map[string]string{
|
|
||||||
"max-age": "Max age of upload to delete",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
|
||||||
maxAge := defaultMaxAge
|
|
||||||
if opt["max-age"] != "" {
|
|
||||||
maxAge, err = fs.ParseDuration(opt["max-age"])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("bad max-age: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, f.cleanUp(ctx, false, true, maxAge)
|
|
||||||
}
|
|
||||||
|
|
||||||
var cleanupHiddenHelp = fs.CommandHelp{
|
|
||||||
Name: "cleanup-hidden",
|
|
||||||
Short: "Remove old versions of files.",
|
|
||||||
Long: `This command removes any old hidden versions of files.
|
|
||||||
|
|
||||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
|
||||||
it would do.
|
|
||||||
|
|
||||||
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
|
||||||
`,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
|
||||||
return nil, f.cleanUp(ctx, true, false, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{
|
|
||||||
lifecycleHelp,
|
|
||||||
cleanupHelp,
|
|
||||||
cleanupHiddenHelp,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Command the backend to run a named command
|
|
||||||
//
|
|
||||||
// The command run is name
|
|
||||||
// args may be used to read arguments from
|
|
||||||
// opts may be used to read optional arguments from
|
|
||||||
//
|
|
||||||
// The result should be capable of being JSON encoded
|
|
||||||
// If it is a string or a []string it will be shown to the user
|
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
|
||||||
switch name {
|
|
||||||
case "lifecycle":
|
|
||||||
return f.lifecycleCommand(ctx, name, arg, opt)
|
|
||||||
case "cleanup":
|
|
||||||
return f.cleanupCommand(ctx, name, arg, opt)
|
|
||||||
case "cleanup-hidden":
|
|
||||||
return f.cleanupHiddenCommand(ctx, name, arg, opt)
|
|
||||||
default:
|
|
||||||
return nil, fs.ErrorCommandNotFound
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = &Fs{}
|
_ fs.Fs = &Fs{}
|
||||||
@@ -2430,7 +2089,6 @@ var (
|
|||||||
_ fs.ListRer = &Fs{}
|
_ fs.ListRer = &Fs{}
|
||||||
_ fs.PublicLinker = &Fs{}
|
_ fs.PublicLinker = &Fs{}
|
||||||
_ fs.OpenChunkWriter = &Fs{}
|
_ fs.OpenChunkWriter = &Fs{}
|
||||||
_ fs.Commander = &Fs{}
|
|
||||||
_ fs.Object = &Object{}
|
_ fs.Object = &Object{}
|
||||||
_ fs.MimeTyper = &Object{}
|
_ fs.MimeTyper = &Object{}
|
||||||
_ fs.IDer = &Object{}
|
_ fs.IDer = &Object{}
|
||||||
|
|||||||
@@ -1,31 +1,23 @@
|
|||||||
package b2
|
package b2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha1"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/b2/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/object"
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/rclone/rclone/lib/version"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test b2 string encoding
|
// Test b2 string encoding
|
||||||
// https://www.backblaze.com/docs/cloud-storage-native-api-string-encoding
|
// https://www.backblaze.com/b2/docs/string_encoding.html
|
||||||
|
|
||||||
var encodeTest = []struct {
|
var encodeTest = []struct {
|
||||||
fullyEncoded string
|
fullyEncoded string
|
||||||
@@ -186,434 +178,99 @@ func TestParseTimeString(t *testing.T) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
|
// The integration tests do a reasonable job of testing the normal
|
||||||
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
|
// copy but don't test the chunked copy.
|
||||||
var headers = make(map[string]string)
|
func (f *Fs) InternalTestChunkedCopy(t *testing.T) {
|
||||||
for _, option := range options {
|
|
||||||
k, v := option.Header()
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
if strings.HasPrefix(k, headerPrefix) {
|
|
||||||
headers[k[len(headerPrefix):]] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return headers
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
|
|
||||||
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
|
|
||||||
t.Run(what, func(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
ss := fs.SizeSuffix(0)
|
|
||||||
err := ss.Set(size)
|
|
||||||
require.NoError(t, err)
|
|
||||||
original := random.String(int(ss))
|
|
||||||
|
|
||||||
contents := fstest.Gz(t, original)
|
|
||||||
mimeType := "text/html"
|
|
||||||
|
|
||||||
if chunkSize != "" {
|
|
||||||
ss := fs.SizeSuffix(0)
|
|
||||||
err := ss.Set(chunkSize)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = f.SetUploadChunkSize(ss)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if uploadCutoff != "" {
|
|
||||||
ss := fs.SizeSuffix(0)
|
|
||||||
err := ss.Set(uploadCutoff)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = f.SetUploadCutoff(ss)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
|
||||||
btime := time.Now()
|
|
||||||
metadata := fs.Metadata{
|
|
||||||
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
|
|
||||||
|
|
||||||
"mtime": "2009-05-06T04:05:06.499Z",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Need to specify HTTP options with the header prefix since they are passed as-is
|
|
||||||
options := []fs.OpenOption{
|
|
||||||
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
|
|
||||||
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
|
|
||||||
}
|
|
||||||
|
|
||||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
|
|
||||||
defer func() {
|
|
||||||
assert.NoError(t, obj.Remove(ctx))
|
|
||||||
}()
|
|
||||||
o := obj.(*Object)
|
|
||||||
gotMetadata, err := o.getMetaData(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// X-Bz-Info-a & X-Bz-Info-b
|
|
||||||
optMetadata := OpenOptionToMetaData(options)
|
|
||||||
for k, v := range optMetadata {
|
|
||||||
got := gotMetadata.Info[k]
|
|
||||||
assert.Equal(t, v, got, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
|
||||||
|
|
||||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
|
||||||
var mtime api.Timestamp
|
|
||||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
|
||||||
}
|
|
||||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
|
||||||
|
|
||||||
// Upload time
|
|
||||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
|
||||||
dt := gotBtime.Sub(btime)
|
|
||||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
|
||||||
|
|
||||||
t.Run("GzipEncoding", func(t *testing.T) {
|
|
||||||
// Test that the gzipped file we uploaded can be
|
|
||||||
// downloaded
|
|
||||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
|
||||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
|
||||||
assert.Equal(t, wantContents, gotContents)
|
|
||||||
assert.Equal(t, wantSize, o.Size())
|
|
||||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, wantHash, gotHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("NoDecompress", func(t *testing.T) {
|
|
||||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
|
||||||
// 1 kB regular file
|
|
||||||
f.internalTestMetadata(t, "1kiB", "", "")
|
|
||||||
|
|
||||||
// 10 MiB large file
|
|
||||||
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
|
|
||||||
}
|
|
||||||
|
|
||||||
func sha1Sum(t *testing.T, s string) string {
|
|
||||||
hash := sha1.Sum([]byte(s))
|
|
||||||
return fmt.Sprintf("%x", hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is adapted from the s3 equivalent.
|
|
||||||
func (f *Fs) InternalTestVersions(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
// Small pause to make the LastModified different since AWS
|
contents := random.String(8 * 1024 * 1024)
|
||||||
// only seems to track them to 1 second granularity
|
item := fstest.NewItem("chunked-copy", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||||
time.Sleep(2 * time.Second)
|
src := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||||
|
|
||||||
// Create an object
|
|
||||||
const dirName = "versions"
|
|
||||||
const fileName = dirName + "/" + "test-versions.txt"
|
|
||||||
contents := random.String(100)
|
|
||||||
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
|
||||||
defer func() {
|
defer func() {
|
||||||
assert.NoError(t, obj.Remove(ctx))
|
assert.NoError(t, src.Remove(ctx))
|
||||||
}()
|
}()
|
||||||
objMetadata, err := obj.(*Object).getMetaData(ctx)
|
|
||||||
|
var itemCopy = item
|
||||||
|
itemCopy.Path += ".copy"
|
||||||
|
|
||||||
|
// Set copy cutoff to mininum value so we make chunks
|
||||||
|
origCutoff := f.opt.CopyCutoff
|
||||||
|
f.opt.CopyCutoff = minChunkSize
|
||||||
|
defer func() {
|
||||||
|
f.opt.CopyCutoff = origCutoff
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Do the copy
|
||||||
|
dst, err := f.Copy(ctx, src, itemCopy.Path)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
assert.NoError(t, dst.Remove(ctx))
|
||||||
|
}()
|
||||||
|
|
||||||
// Small pause
|
// Check size
|
||||||
time.Sleep(2 * time.Second)
|
assert.Equal(t, src.Size(), dst.Size())
|
||||||
|
|
||||||
// Remove it
|
// Check modtime
|
||||||
assert.NoError(t, obj.Remove(ctx))
|
srcModTime := src.ModTime(ctx)
|
||||||
|
dstModTime := dst.ModTime(ctx)
|
||||||
|
assert.True(t, srcModTime.Equal(dstModTime))
|
||||||
|
|
||||||
// Small pause to make the LastModified different since AWS only seems to track them to 1 second granularity
|
// Make sure contents are correct
|
||||||
time.Sleep(2 * time.Second)
|
gotContents := fstests.ReadObject(ctx, t, dst, -1)
|
||||||
|
assert.Equal(t, contents, gotContents)
|
||||||
|
}
|
||||||
|
|
||||||
// And create it with different size and contents
|
// The integration tests do a reasonable job of testing the normal
|
||||||
newContents := random.String(101)
|
// streaming upload but don't test the chunked streaming upload.
|
||||||
newItem := fstest.NewItem(fileName, newContents, fstest.Time("2002-05-06T04:05:06.499999999Z"))
|
func (f *Fs) InternalTestChunkedStreamingUpload(t *testing.T, size int) {
|
||||||
newObj := fstests.PutTestContents(ctx, t, f, &newItem, newContents, true)
|
ctx := context.Background()
|
||||||
newObjMetadata, err := newObj.(*Object).getMetaData(ctx)
|
contents := random.String(size)
|
||||||
|
item := fstest.NewItem(fmt.Sprintf("chunked-streaming-upload-%d", size), contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||||
|
|
||||||
|
// Set chunk size to mininum value so we make chunks
|
||||||
|
origOpt := f.opt
|
||||||
|
f.opt.ChunkSize = minChunkSize
|
||||||
|
f.opt.UploadCutoff = 0
|
||||||
|
defer func() {
|
||||||
|
f.opt = origOpt
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Do the streaming upload
|
||||||
|
src := object.NewStaticObjectInfo(item.Path, item.ModTime, -1, true, item.Hashes, f)
|
||||||
|
in := bytes.NewBufferString(contents)
|
||||||
|
dst, err := f.PutStream(ctx, in, src)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
assert.NoError(t, dst.Remove(ctx))
|
||||||
|
}()
|
||||||
|
|
||||||
t.Run("Versions", func(t *testing.T) {
|
// Check size
|
||||||
// Set --b2-versions for this test
|
assert.Equal(t, int64(size), dst.Size())
|
||||||
f.opt.Versions = true
|
|
||||||
defer func() {
|
|
||||||
f.opt.Versions = false
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Read the contents
|
// Check modtime
|
||||||
entries, err := f.List(ctx, dirName)
|
srcModTime := src.ModTime(ctx)
|
||||||
require.NoError(t, err)
|
dstModTime := dst.ModTime(ctx)
|
||||||
tests := 0
|
assert.Equal(t, srcModTime, dstModTime)
|
||||||
var fileNameVersion string
|
|
||||||
for _, entry := range entries {
|
|
||||||
t.Log(entry)
|
|
||||||
remote := entry.Remote()
|
|
||||||
if remote == fileName {
|
|
||||||
t.Run("ReadCurrent", func(t *testing.T) {
|
|
||||||
assert.Equal(t, newContents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
|
|
||||||
})
|
|
||||||
tests++
|
|
||||||
} else if versionTime, p := version.Remove(remote); !versionTime.IsZero() && p == fileName {
|
|
||||||
t.Run("ReadVersion", func(t *testing.T) {
|
|
||||||
assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
|
|
||||||
})
|
|
||||||
assert.WithinDuration(t, time.Time(objMetadata.UploadTimestamp), versionTime, time.Second, "object time must be with 1 second of version time")
|
|
||||||
fileNameVersion = remote
|
|
||||||
tests++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.Equal(t, 2, tests, "object missing from listing")
|
|
||||||
|
|
||||||
// Check we can read the object with a version suffix
|
// Make sure contents are correct
|
||||||
t.Run("NewObject", func(t *testing.T) {
|
gotContents := fstests.ReadObject(ctx, t, dst, -1)
|
||||||
o, err := f.NewObject(ctx, fileNameVersion)
|
assert.Equal(t, contents, gotContents, "Contents incorrect")
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, o)
|
|
||||||
assert.Equal(t, int64(100), o.Size(), o.Remote())
|
|
||||||
})
|
|
||||||
|
|
||||||
// Check we can make a NewFs from that object with a version suffix
|
|
||||||
t.Run("NewFs", func(t *testing.T) {
|
|
||||||
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
|
|
||||||
// Make sure --b2-versions is set in the config of the new remote
|
|
||||||
fs.Debugf(nil, "oldPath = %q", newPath)
|
|
||||||
lastColon := strings.LastIndex(newPath, ":")
|
|
||||||
require.True(t, lastColon >= 0)
|
|
||||||
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
|
|
||||||
fs.Debugf(nil, "newPath = %q", newPath)
|
|
||||||
fNew, err := cache.Get(ctx, newPath)
|
|
||||||
// This should return pointing to a file
|
|
||||||
require.Equal(t, fs.ErrorIsFile, err)
|
|
||||||
require.NotNil(t, fNew)
|
|
||||||
// With the directory above
|
|
||||||
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("VersionAt", func(t *testing.T) {
|
|
||||||
// We set --b2-version-at for this test so make sure we reset it at the end
|
|
||||||
defer func() {
|
|
||||||
f.opt.VersionAt = fs.Time{}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var (
|
|
||||||
firstObjectTime = time.Time(objMetadata.UploadTimestamp)
|
|
||||||
secondObjectTime = time.Time(newObjMetadata.UploadTimestamp)
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, test := range []struct {
|
|
||||||
what string
|
|
||||||
at time.Time
|
|
||||||
want []fstest.Item
|
|
||||||
wantErr error
|
|
||||||
wantSize int64
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
what: "Before",
|
|
||||||
at: firstObjectTime.Add(-time.Second),
|
|
||||||
want: fstests.InternalTestFiles,
|
|
||||||
wantErr: fs.ErrorObjectNotFound,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
what: "AfterOne",
|
|
||||||
at: firstObjectTime.Add(time.Second),
|
|
||||||
want: append([]fstest.Item{item}, fstests.InternalTestFiles...),
|
|
||||||
wantSize: 100,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
what: "AfterDelete",
|
|
||||||
at: secondObjectTime.Add(-time.Second),
|
|
||||||
want: fstests.InternalTestFiles,
|
|
||||||
wantErr: fs.ErrorObjectNotFound,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
what: "AfterTwo",
|
|
||||||
at: secondObjectTime.Add(time.Second),
|
|
||||||
want: append([]fstest.Item{newItem}, fstests.InternalTestFiles...),
|
|
||||||
wantSize: 101,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(test.what, func(t *testing.T) {
|
|
||||||
f.opt.VersionAt = fs.Time(test.at)
|
|
||||||
t.Run("List", func(t *testing.T) {
|
|
||||||
fstest.CheckListing(t, f, test.want)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("NewObject", func(t *testing.T) {
|
|
||||||
gotObj, gotErr := f.NewObject(ctx, fileName)
|
|
||||||
assert.Equal(t, test.wantErr, gotErr)
|
|
||||||
if gotErr == nil {
|
|
||||||
assert.Equal(t, test.wantSize, gotObj.Size())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Cleanup", func(t *testing.T) {
|
|
||||||
t.Run("DryRun", func(t *testing.T) {
|
|
||||||
f.opt.Versions = true
|
|
||||||
defer func() {
|
|
||||||
f.opt.Versions = false
|
|
||||||
}()
|
|
||||||
// Listing should be unchanged after dry run
|
|
||||||
before := listAllFiles(ctx, t, f, dirName)
|
|
||||||
ctx, ci := fs.AddConfig(ctx)
|
|
||||||
ci.DryRun = true
|
|
||||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
|
||||||
after := listAllFiles(ctx, t, f, dirName)
|
|
||||||
assert.Equal(t, before, after)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("RealThing", func(t *testing.T) {
|
|
||||||
f.opt.Versions = true
|
|
||||||
defer func() {
|
|
||||||
f.opt.Versions = false
|
|
||||||
}()
|
|
||||||
// Listing should reflect current state after cleanup
|
|
||||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
|
||||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
|
||||||
fstest.CheckListing(t, f, items)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
// Purge gets tested later
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// B2CleanupHidden tests cleaning up hidden files
|
|
||||||
t.Run("CleanupUnfinished", func(t *testing.T) {
|
|
||||||
dirName := "unfinished"
|
|
||||||
fileCount := 5
|
|
||||||
expectedFiles := []string{}
|
|
||||||
for i := 1; i < fileCount; i++ {
|
|
||||||
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
|
|
||||||
expectedFiles = append(expectedFiles, fileName)
|
|
||||||
obj := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: fileName,
|
|
||||||
}
|
|
||||||
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
|
|
||||||
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
|
||||||
|
|
||||||
t.Run("DryRun", func(t *testing.T) {
|
|
||||||
// Listing should not change after dry run
|
|
||||||
ctx, ci := fs.AddConfig(ctx)
|
|
||||||
ci.DryRun = true
|
|
||||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
|
||||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("RealThing", func(t *testing.T) {
|
|
||||||
// Listing should be empty after real cleanup
|
|
||||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
|
||||||
checkListing(ctx, t, f, dirName, []string{})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
|
|
||||||
bucket, directory := f.split(dirName)
|
|
||||||
foundFiles := []string{}
|
|
||||||
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
|
||||||
if !isDirectory {
|
|
||||||
foundFiles = append(foundFiles, object.Name)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}))
|
|
||||||
sort.Strings(foundFiles)
|
|
||||||
return foundFiles
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
|
|
||||||
foundFiles := listAllFiles(ctx, t, f, dirName)
|
|
||||||
sort.Strings(expectedFiles)
|
|
||||||
assert.Equal(t, expectedFiles, foundFiles)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
opt := map[string]string{}
|
|
||||||
|
|
||||||
t.Run("InitState", func(t *testing.T) {
|
|
||||||
// There should be no lifecycle rules at the outset
|
|
||||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, len(lifecycleRules))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("DryRun", func(t *testing.T) {
|
|
||||||
// There should still be no lifecycle rules after each dry run operation
|
|
||||||
ctx, ci := fs.AddConfig(ctx)
|
|
||||||
ci.DryRun = true
|
|
||||||
|
|
||||||
opt["daysFromHidingToDeleting"] = "30"
|
|
||||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, len(lifecycleRules))
|
|
||||||
|
|
||||||
delete(opt, "daysFromHidingToDeleting")
|
|
||||||
opt["daysFromUploadingToHiding"] = "40"
|
|
||||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, len(lifecycleRules))
|
|
||||||
|
|
||||||
opt["daysFromHidingToDeleting"] = "30"
|
|
||||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, len(lifecycleRules))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("RealThing", func(t *testing.T) {
|
|
||||||
opt["daysFromHidingToDeleting"] = "30"
|
|
||||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(lifecycleRules))
|
|
||||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
|
||||||
|
|
||||||
delete(opt, "daysFromHidingToDeleting")
|
|
||||||
opt["daysFromUploadingToHiding"] = "40"
|
|
||||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(lifecycleRules))
|
|
||||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
|
||||||
|
|
||||||
opt["daysFromHidingToDeleting"] = "30"
|
|
||||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(lifecycleRules))
|
|
||||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
|
||||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
t.Run("Metadata", f.InternalTestMetadata)
|
t.Run("ChunkedCopy", f.InternalTestChunkedCopy)
|
||||||
t.Run("Versions", f.InternalTestVersions)
|
for _, size := range []fs.SizeSuffix{
|
||||||
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
|
minChunkSize - 1,
|
||||||
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
|
minChunkSize,
|
||||||
|
minChunkSize + 1,
|
||||||
|
(3 * minChunkSize) / 2,
|
||||||
|
(5 * minChunkSize) / 2,
|
||||||
|
} {
|
||||||
|
t.Run(fmt.Sprintf("ChunkedStreamingUpload/%d", size), func(t *testing.T) {
|
||||||
|
f.InternalTestChunkedStreamingUpload(t, int(size))
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -28,12 +28,7 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|||||||
return f.setUploadCutoff(cs)
|
return f.setUploadCutoff(cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setCopyCutoff(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Upload large files for b2
|
// Upload large files for b2
|
||||||
//
|
//
|
||||||
// Docs - https://www.backblaze.com/docs/cloud-storage-large-files
|
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
||||||
|
|
||||||
package b2
|
package b2
|
||||||
|
|
||||||
@@ -91,7 +91,7 @@ type largeUpload struct {
|
|||||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||||
//
|
//
|
||||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) {
|
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
parts := 0
|
parts := 0
|
||||||
chunkSize := defaultChunkSize
|
chunkSize := defaultChunkSize
|
||||||
@@ -104,6 +104,11 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
parts++
|
parts++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/b2_start_large_file",
|
||||||
|
}
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
bucketID, err := f.getBucketID(ctx, bucket)
|
bucketID, err := f.getBucketID(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -113,27 +118,12 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
BucketID: bucketID,
|
BucketID: bucketID,
|
||||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||||
}
|
}
|
||||||
optionsToSend := make([]fs.OpenOption, 0, len(options))
|
|
||||||
if newInfo == nil {
|
if newInfo == nil {
|
||||||
modTime, err := o.getModTime(ctx, src, options)
|
modTime := src.ModTime(ctx)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
request.ContentType = fs.MimeType(ctx, src)
|
request.ContentType = fs.MimeType(ctx, src)
|
||||||
request.Info = map[string]string{
|
request.Info = map[string]string{
|
||||||
timeKey: timeString(modTime),
|
timeKey: timeString(modTime),
|
||||||
}
|
}
|
||||||
// Custom upload headers - remove header prefix since they are sent in the body
|
|
||||||
for _, option := range options {
|
|
||||||
k, v := option.Header()
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
if strings.HasPrefix(k, headerPrefix) {
|
|
||||||
request.Info[k[len(headerPrefix):]] = v
|
|
||||||
} else {
|
|
||||||
optionsToSend = append(optionsToSend, option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Set the SHA1 if known
|
// Set the SHA1 if known
|
||||||
if !o.fs.opt.DisableCheckSum || doCopy {
|
if !o.fs.opt.DisableCheckSum || doCopy {
|
||||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||||
@@ -144,11 +134,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
request.ContentType = newInfo.ContentType
|
request.ContentType = newInfo.ContentType
|
||||||
request.Info = newInfo.Info
|
request.Info = newInfo.Info
|
||||||
}
|
}
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/b2_start_large_file",
|
|
||||||
Options: optionsToSend,
|
|
||||||
}
|
|
||||||
var response api.StartLargeFileResponse
|
var response api.StartLargeFileResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
@@ -432,13 +417,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW)
|
|||||||
} else {
|
} else {
|
||||||
n, err = io.CopyN(rw, up.in, up.chunkSize)
|
n, err = io.CopyN(rw, up.in, up.chunkSize)
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
if n == 0 {
|
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||||
fs.Debugf(up.o, "Not sending empty chunk after EOF - ending.")
|
|
||||||
up.f.putRW(rw)
|
|
||||||
break
|
|
||||||
} else {
|
|
||||||
fs.Debugf(up.o, "Read less than a full chunk %d, making this the last one.", n)
|
|
||||||
}
|
|
||||||
hasMoreParts = false
|
hasMoreParts = false
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
// other kinds of errors indicate failure
|
// other kinds of errors indicate failure
|
||||||
@@ -478,14 +457,17 @@ func (up *largeUpload) Copy(ctx context.Context) (err error) {
|
|||||||
remaining = up.size
|
remaining = up.size
|
||||||
)
|
)
|
||||||
g.SetLimit(up.f.opt.UploadConcurrency)
|
g.SetLimit(up.f.opt.UploadConcurrency)
|
||||||
for part := range up.parts {
|
for part := 0; part < up.parts; part++ {
|
||||||
// Fail fast, in case an errgroup managed function returns an error
|
// Fail fast, in case an errgroup managed function returns an error
|
||||||
// gCtx is cancelled. There is no point in copying all the other parts.
|
// gCtx is cancelled. There is no point in copying all the other parts.
|
||||||
if gCtx.Err() != nil {
|
if gCtx.Err() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
reqSize := min(remaining, up.chunkSize)
|
reqSize := remaining
|
||||||
|
if reqSize >= up.chunkSize {
|
||||||
|
reqSize = up.chunkSize
|
||||||
|
}
|
||||||
|
|
||||||
part := part // for the closure
|
part := part // for the closure
|
||||||
g.Go(func() (err error) {
|
g.Go(func() (err error) {
|
||||||
|
|||||||
@@ -167,7 +167,19 @@ type PreUploadCheckResponse struct {
|
|||||||
// PreUploadCheckConflict is returned in the ContextInfo error field
|
// PreUploadCheckConflict is returned in the ContextInfo error field
|
||||||
// from PreUploadCheck when the error code is "item_name_in_use"
|
// from PreUploadCheck when the error code is "item_name_in_use"
|
||||||
type PreUploadCheckConflict struct {
|
type PreUploadCheckConflict struct {
|
||||||
Conflicts ItemMini `json:"conflicts"`
|
Conflicts struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
FileVersion struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
Sha1 string `json:"sha1"`
|
||||||
|
} `json:"file_version"`
|
||||||
|
SequenceID string `json:"sequence_id"`
|
||||||
|
Etag string `json:"etag"`
|
||||||
|
Sha1 string `json:"sha1"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
} `json:"conflicts"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateFileModTime is used in Update File Info
|
// UpdateFileModTime is used in Update File Info
|
||||||
|
|||||||
@@ -43,9 +43,9 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/jwtutil"
|
"github.com/rclone/rclone/lib/jwtutil"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/random"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
"github.com/youmark/pkcs8"
|
"github.com/youmark/pkcs8"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -64,10 +64,12 @@ const (
|
|||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
oauthConfig = &oauthutil.Config{
|
oauthConfig = &oauth2.Config{
|
||||||
Scopes: nil,
|
Scopes: nil,
|
||||||
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
Endpoint: oauth2.Endpoint{
|
||||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
||||||
|
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||||
|
},
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
RedirectURL: oauthutil.RedirectURL,
|
||||||
@@ -237,8 +239,8 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
|
|||||||
return claims, nil
|
return claims, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]any {
|
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
|
||||||
signingHeaders := map[string]any{
|
signingHeaders := map[string]interface{}{
|
||||||
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||||
}
|
}
|
||||||
return signingHeaders
|
return signingHeaders
|
||||||
@@ -254,10 +256,8 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||||
|
|
||||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||||
if block == nil {
|
|
||||||
return nil, errors.New("box: failed to PEM decode private key")
|
|
||||||
}
|
|
||||||
if len(rest) > 0 {
|
if len(rest) > 0 {
|
||||||
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||||
}
|
}
|
||||||
@@ -380,7 +380,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
// readMetaDataForPath reads the metadata from the path
|
||||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||||
// defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
@@ -389,30 +389,20 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use preupload to find the ID
|
found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
|
||||||
itemMini, err := f.preUploadCheck(ctx, leaf, directoryID, -1)
|
if strings.EqualFold(item.Name, leaf) {
|
||||||
if err != nil {
|
info = item
|
||||||
return nil, err
|
return true
|
||||||
}
|
}
|
||||||
if itemMini == nil {
|
return false
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now we have the ID we can look up the object proper
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/files/" + itemMini.ID,
|
|
||||||
Parameters: fieldsValue(),
|
|
||||||
}
|
|
||||||
var item api.Item
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &item)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &item, nil
|
if !found {
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// errorHandler parses a non 2xx error response into an error
|
// errorHandler parses a non 2xx error response into an error
|
||||||
@@ -619,7 +609,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// fmt.Printf("...Error %v\n", err)
|
//fmt.Printf("...Error %v\n", err)
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
// fmt.Printf("...Id %q\n", *info.Id)
|
// fmt.Printf("...Id %q\n", *info.Id)
|
||||||
@@ -772,7 +762,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
|||||||
//
|
//
|
||||||
// It returns "", nil if the file is good to go
|
// It returns "", nil if the file is good to go
|
||||||
// It returns "ID", nil if the file must be updated
|
// It returns "ID", nil if the file must be updated
|
||||||
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (item *api.ItemMini, err error) {
|
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
|
||||||
check := api.PreUploadCheck{
|
check := api.PreUploadCheck{
|
||||||
Name: f.opt.Enc.FromStandardName(leaf),
|
Name: f.opt.Enc.FromStandardName(leaf),
|
||||||
Parent: api.Parent{
|
Parent: api.Parent{
|
||||||
@@ -797,16 +787,16 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
|
|||||||
var conflict api.PreUploadCheckConflict
|
var conflict api.PreUploadCheckConflict
|
||||||
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
|
return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
|
||||||
}
|
}
|
||||||
if conflict.Conflicts.Type != api.ItemTypeFile {
|
if conflict.Conflicts.Type != api.ItemTypeFile {
|
||||||
return nil, fs.ErrorIsDir
|
return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
|
||||||
}
|
}
|
||||||
return &conflict.Conflicts, nil
|
return conflict.Conflicts.ID, nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("pre-upload check: %w", err)
|
return "", fmt.Errorf("pre-upload check: %w", err)
|
||||||
}
|
}
|
||||||
return nil, nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
@@ -827,11 +817,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
|
|
||||||
// Preflight check the upload, which returns the ID if the
|
// Preflight check the upload, which returns the ID if the
|
||||||
// object already exists
|
// object already exists
|
||||||
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if item == nil {
|
if ID == "" {
|
||||||
return f.PutUnchecked(ctx, in, src, options...)
|
return f.PutUnchecked(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -839,7 +829,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
id: item.ID,
|
id: ID,
|
||||||
}
|
}
|
||||||
return o, o.Update(ctx, in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
@@ -966,26 +956,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if dest already exists
|
|
||||||
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if item != nil { // dest already exists, need to copy to temp name and then move
|
|
||||||
tempSuffix := "-rclone-copy-" + random.String(8)
|
|
||||||
fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix)
|
|
||||||
tempObj, err := f.Copy(ctx, src, remote+tempSuffix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
fs.Debugf(remote+tempSuffix, "moving to real name %v", remote)
|
|
||||||
err = f.deleteObject(ctx, item.ID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return f.Move(ctx, tempObj, remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy the object
|
// Copy the object
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -1227,12 +1197,6 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown shutdown the fs
|
|
||||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
|
||||||
f.tokenRenewer.Shutdown()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChangeNotify calls the passed function with a path that has had changes.
|
// ChangeNotify calls the passed function with a path that has had changes.
|
||||||
// If the implementation uses polling, it should adhere to the given interval.
|
// If the implementation uses polling, it should adhere to the given interval.
|
||||||
//
|
//
|
||||||
@@ -1247,10 +1211,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
fs.Infof(f, "Failed to get StreamPosition: %s", err)
|
fs.Infof(f, "Failed to get StreamPosition: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// box can send duplicate Event IDs. Use this map to track and filter
|
|
||||||
// the ones we've already processed.
|
|
||||||
processedEventIDs := make(map[string]time.Time)
|
|
||||||
|
|
||||||
var ticker *time.Ticker
|
var ticker *time.Ticker
|
||||||
var tickerC <-chan time.Time
|
var tickerC <-chan time.Time
|
||||||
for {
|
for {
|
||||||
@@ -1278,15 +1238,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition)
|
||||||
// Garbage collect EventIDs older than 1 minute
|
|
||||||
for eventID, timestamp := range processedEventIDs {
|
|
||||||
if time.Since(timestamp) > time.Minute {
|
|
||||||
delete(processedEventIDs, eventID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition, processedEventIDs)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Infof(f, "Change notify listener failure: %s", err)
|
fs.Infof(f, "Change notify listener failure: %s", err)
|
||||||
}
|
}
|
||||||
@@ -1339,12 +1291,19 @@ func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
|
|||||||
return fullPath
|
return fullPath
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string, processedEventIDs map[string]time.Time) (nextStreamPosition string, err error) {
|
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string) (nextStreamPosition string, err error) {
|
||||||
nextStreamPosition = streamPosition
|
nextStreamPosition = streamPosition
|
||||||
|
|
||||||
|
// box can send duplicate Event IDs; filter any in a single notify run
|
||||||
|
processedEventIDs := make(map[string]bool)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
limit := f.opt.ListChunk
|
||||||
|
|
||||||
// box only allows a max of 500 events
|
// box only allows a max of 500 events
|
||||||
limit := min(f.opt.ListChunk, 500)
|
if limit > 500 {
|
||||||
|
limit = 500
|
||||||
|
}
|
||||||
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
@@ -1382,32 +1341,21 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
|||||||
var pathsToClear []pathToClear
|
var pathsToClear []pathToClear
|
||||||
newEventIDs := 0
|
newEventIDs := 0
|
||||||
for _, entry := range result.Entries {
|
for _, entry := range result.Entries {
|
||||||
eventDetails := fmt.Sprintf("[%q(%d)|%s|%s|%s|%s]", entry.Source.Name, entry.Source.SequenceID,
|
if entry.EventID == "" || processedEventIDs[entry.EventID] { // missing Event ID, or already saw this one
|
||||||
entry.Source.Type, entry.EventType, entry.Source.ID, entry.EventID)
|
|
||||||
|
|
||||||
if entry.EventID == "" {
|
|
||||||
fs.Debugf(f, "%s ignored due to missing EventID", eventDetails)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, ok := processedEventIDs[entry.EventID]; ok {
|
processedEventIDs[entry.EventID] = true
|
||||||
fs.Debugf(f, "%s ignored due to duplicate EventID", eventDetails)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
processedEventIDs[entry.EventID] = time.Now()
|
|
||||||
newEventIDs++
|
newEventIDs++
|
||||||
|
|
||||||
if entry.Source.ID == "" { // missing File or Folder ID
|
if entry.Source.ID == "" { // missing File or Folder ID
|
||||||
fs.Debugf(f, "%s ignored due to missing SourceID", eventDetails)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
|
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
|
||||||
fs.Debugf(f, "%s ignored due to unsupported SourceType", eventDetails)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only interested in event types that result in a file tree change
|
// Only interested in event types that result in a file tree change
|
||||||
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
|
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
|
||||||
fs.Debugf(f, "%s ignored due to unsupported EventType", eventDetails)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1418,7 +1366,6 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
|||||||
// Item in the cache has the same or newer SequenceID than
|
// Item in the cache has the same or newer SequenceID than
|
||||||
// this event. Ignore this event, it must be old.
|
// this event. Ignore this event, it must be old.
|
||||||
f.itemMetaCacheMu.Unlock()
|
f.itemMetaCacheMu.Unlock()
|
||||||
fs.Debugf(f, "%s ignored due to old SequenceID (%q)", eventDetails, itemMeta.SequenceID)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1440,10 +1387,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
|||||||
if cachedItemMetaFound {
|
if cachedItemMetaFound {
|
||||||
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
|
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
|
||||||
if path != "" {
|
if path != "" {
|
||||||
fs.Debugf(f, "%s added old path (%q) for notify", eventDetails, path)
|
|
||||||
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
||||||
} else {
|
|
||||||
fs.Debugf(f, "%s old parent not cached", eventDetails)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this is a directory, also delete it from the dir cache.
|
// If this is a directory, also delete it from the dir cache.
|
||||||
@@ -1467,10 +1411,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
|||||||
if entry.Source.ItemStatus == api.ItemStatusActive {
|
if entry.Source.ItemStatus == api.ItemStatusActive {
|
||||||
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
|
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
|
||||||
if path != "" {
|
if path != "" {
|
||||||
fs.Debugf(f, "%s added new path (%q) for notify", eventDetails, path)
|
|
||||||
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
||||||
} else {
|
|
||||||
fs.Debugf(f, "%s new parent not found", eventDetails)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1741,7 +1682,6 @@ var (
|
|||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.IDer = (*Object)(nil)
|
_ fs.IDer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -105,7 +105,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
|
|||||||
const defaultDelay = 10
|
const defaultDelay = 10
|
||||||
var tries int
|
var tries int
|
||||||
outer:
|
outer:
|
||||||
for tries = range maxTries {
|
for tries = 0; tries < maxTries; tries++ {
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -203,7 +203,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
|||||||
errs := make(chan error, 1)
|
errs := make(chan error, 1)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
outer:
|
outer:
|
||||||
for part := range session.TotalParts {
|
for part := 0; part < session.TotalParts; part++ {
|
||||||
// Check any errors
|
// Check any errors
|
||||||
select {
|
select {
|
||||||
case err = <-errs:
|
case err = <-errs:
|
||||||
@@ -211,7 +211,10 @@ outer:
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
reqSize := min(remaining, chunkSize)
|
reqSize := remaining
|
||||||
|
if reqSize >= chunkSize {
|
||||||
|
reqSize = chunkSize
|
||||||
|
}
|
||||||
|
|
||||||
// Make a block of memory
|
// Make a block of memory
|
||||||
buf := make([]byte, reqSize)
|
buf := make([]byte, reqSize)
|
||||||
|
|||||||
36
backend/cache/cache.go
vendored
36
backend/cache/cache.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
// Package cache implements a virtual provider to cache existing remotes.
|
// Package cache implements a virtual provider to cache existing remotes.
|
||||||
package cache
|
package cache
|
||||||
@@ -29,7 +30,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/list"
|
|
||||||
"github.com/rclone/rclone/fs/rc"
|
"github.com/rclone/rclone/fs/rc"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
@@ -410,16 +410,18 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||||
}
|
}
|
||||||
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
} else {
|
||||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||||
if err != nil {
|
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||||
decPass = opt.PlexPassword
|
if err != nil {
|
||||||
}
|
decPass = opt.PlexPassword
|
||||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
}
|
||||||
m.Set("plex_token", token)
|
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||||
})
|
m.Set("plex_token", token)
|
||||||
if err != nil {
|
})
|
||||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1087,13 +1089,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
return cachedEntries, nil
|
return cachedEntries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error {
|
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
|
||||||
entries, err := f.List(ctx, dir)
|
entries, err := f.List(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range entries {
|
for i := 0; i < len(entries); i++ {
|
||||||
innerDir, ok := entries[i].(fs.Directory)
|
innerDir, ok := entries[i].(fs.Directory)
|
||||||
if ok {
|
if ok {
|
||||||
err := f.recurse(ctx, innerDir.Remote(), list)
|
err := f.recurse(ctx, innerDir.Remote(), list)
|
||||||
@@ -1139,7 +1141,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
||||||
list := list.NewHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
err = f.recurse(ctx, dir, list)
|
err = f.recurse(ctx, dir, list)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1429,7 +1431,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// wait until both are done
|
// wait until both are done
|
||||||
for range 2 {
|
for c := 0; c < 2; c++ {
|
||||||
<-done
|
<-done
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1754,7 +1756,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stats returns stats about the cache storage
|
// Stats returns stats about the cache storage
|
||||||
func (f *Fs) Stats() (map[string]map[string]any, error) {
|
func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
|
||||||
return f.cache.Stats()
|
return f.cache.Stats()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1934,7 +1936,7 @@ var commandHelp = []fs.CommandHelp{
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "stats":
|
case "stats":
|
||||||
return f.Stats()
|
return f.Stats()
|
||||||
|
|||||||
110
backend/cache/cache_internal_test.go
vendored
110
backend/cache/cache_internal_test.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js && !race
|
//go:build !plan9 && !js && !race
|
||||||
|
// +build !plan9,!js,!race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@@ -10,6 +11,7 @@ import (
|
|||||||
goflag "flag"
|
goflag "flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@@ -28,11 +30,10 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/object"
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/testy"
|
"github.com/rclone/rclone/fstest/testy"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/rclone/rclone/vfs/vfscommon"
|
"github.com/rclone/rclone/vfs/vfsflags"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -92,7 +93,7 @@ func TestMain(m *testing.M) {
|
|||||||
goflag.Parse()
|
goflag.Parse()
|
||||||
var rc int
|
var rc int
|
||||||
|
|
||||||
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName)
|
log.Printf("Running with the following params: \n remote: %v", remoteName)
|
||||||
runInstance = newRun()
|
runInstance = newRun()
|
||||||
rc = m.Run()
|
rc = m.Run()
|
||||||
os.Exit(rc)
|
os.Exit(rc)
|
||||||
@@ -122,10 +123,10 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
|||||||
|
|
||||||
/* TODO: is this testing something?
|
/* TODO: is this testing something?
|
||||||
func TestInternalVfsCache(t *testing.T) {
|
func TestInternalVfsCache(t *testing.T) {
|
||||||
vfscommon.Opt.DirCacheTime = time.Second * 30
|
vfsflags.Opt.DirCacheTime = time.Second * 30
|
||||||
testSize := int64(524288000)
|
testSize := int64(524288000)
|
||||||
|
|
||||||
vfscommon.Opt.CacheMode = vfs.CacheModeWrites
|
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
||||||
id := "tiuufo"
|
id := "tiuufo"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
@@ -337,7 +338,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
vfsflags.Opt.DirCacheTime = time.Second
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
t.Skip("test skipped with crypt remote")
|
t.Skip("test skipped with crypt remote")
|
||||||
@@ -360,14 +361,14 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||||
|
|
||||||
for i := range checkSample {
|
for i := 0; i < len(checkSample); i++ {
|
||||||
require.Equal(t, testData[i], checkSample[i])
|
require.Equal(t, testData[i], checkSample[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
vfsflags.Opt.DirCacheTime = time.Second
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
t.Skip("test skipped with crypt remote")
|
t.Skip("test skipped with crypt remote")
|
||||||
@@ -387,7 +388,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
|||||||
|
|
||||||
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for i := range readData {
|
for i := 0; i < len(readData); i++ {
|
||||||
require.Equalf(t, testData[i], readData[i], "at byte %v", i)
|
require.Equalf(t, testData[i], readData[i], "at byte %v", i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -407,7 +408,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
// update in the wrapped fs
|
// update in the wrapped fs
|
||||||
originalSize, err := runInstance.size(t, rootFs, "data.bin")
|
originalSize, err := runInstance.size(t, rootFs, "data.bin")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fs.Logf(nil, "original size: %v", originalSize)
|
log.Printf("original size: %v", originalSize)
|
||||||
|
|
||||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -416,7 +417,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
|
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expectedSize++ // FIXME newline gets in, likely test data issue
|
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
|
||||||
} else {
|
} else {
|
||||||
data2 = []byte("test content")
|
data2 = []byte("test content")
|
||||||
}
|
}
|
||||||
@@ -424,7 +425,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(data2)), o.Size())
|
require.Equal(t, int64(len(data2)), o.Size())
|
||||||
fs.Logf(nil, "updated size: %v", len(data2))
|
log.Printf("updated size: %v", len(data2))
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
if runInstance.wrappedIsExternal {
|
if runInstance.wrappedIsExternal {
|
||||||
@@ -484,49 +485,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
li, err := runInstance.list(t, rootFs, "test")
|
li, err := runInstance.list(t, rootFs, "test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(nil, "err: %v", err)
|
log.Printf("err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 2 {
|
if len(li) != 2 {
|
||||||
fs.Logf(nil, "not expected listing /test: %v", li)
|
log.Printf("not expected listing /test: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test: %v", li)
|
return fmt.Errorf("not expected listing /test: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
li, err = runInstance.list(t, rootFs, "test/one")
|
li, err = runInstance.list(t, rootFs, "test/one")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(nil, "err: %v", err)
|
log.Printf("err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 0 {
|
if len(li) != 0 {
|
||||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
log.Printf("not expected listing /test/one: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
li, err = runInstance.list(t, rootFs, "test/second")
|
li, err = runInstance.list(t, rootFs, "test/second")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(nil, "err: %v", err)
|
log.Printf("err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 1 {
|
if len(li) != 1 {
|
||||||
fs.Logf(nil, "not expected listing /test/second: %v", li)
|
log.Printf("not expected listing /test/second: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/second: %v", li)
|
return fmt.Errorf("not expected listing /test/second: %v", li)
|
||||||
}
|
}
|
||||||
if fi, ok := li[0].(os.FileInfo); ok {
|
if fi, ok := li[0].(os.FileInfo); ok {
|
||||||
if fi.Name() != "data.bin" {
|
if fi.Name() != "data.bin" {
|
||||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
log.Printf("not expected name: %v", fi.Name())
|
||||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||||
}
|
}
|
||||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||||
if di.Remote() != "test/second/data.bin" {
|
if di.Remote() != "test/second/data.bin" {
|
||||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
log.Printf("not expected remote: %v", di.Remote())
|
||||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.Logf(nil, "unexpected listing: %v", li)
|
log.Printf("unexpected listing: %v", li)
|
||||||
return fmt.Errorf("unexpected listing: %v", li)
|
return fmt.Errorf("unexpected listing: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.Logf(nil, "complete listing: %v", li)
|
log.Printf("complete listing: %v", li)
|
||||||
return nil
|
return nil
|
||||||
}, 12, time.Second*10)
|
}, 12, time.Second*10)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -576,43 +577,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||||
if !found {
|
if !found {
|
||||||
fs.Logf(nil, "not found /test")
|
log.Printf("not found /test")
|
||||||
return fmt.Errorf("not found /test")
|
return fmt.Errorf("not found /test")
|
||||||
}
|
}
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||||
if !found {
|
if !found {
|
||||||
fs.Logf(nil, "not found /test/one")
|
log.Printf("not found /test/one")
|
||||||
return fmt.Errorf("not found /test/one")
|
return fmt.Errorf("not found /test/one")
|
||||||
}
|
}
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||||
if !found {
|
if !found {
|
||||||
fs.Logf(nil, "not found /test/one/test2")
|
log.Printf("not found /test/one/test2")
|
||||||
return fmt.Errorf("not found /test/one/test2")
|
return fmt.Errorf("not found /test/one/test2")
|
||||||
}
|
}
|
||||||
li, err := runInstance.list(t, rootFs, "test/one")
|
li, err := runInstance.list(t, rootFs, "test/one")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(nil, "err: %v", err)
|
log.Printf("err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 1 {
|
if len(li) != 1 {
|
||||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
log.Printf("not expected listing /test/one: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||||
}
|
}
|
||||||
if fi, ok := li[0].(os.FileInfo); ok {
|
if fi, ok := li[0].(os.FileInfo); ok {
|
||||||
if fi.Name() != "test2" {
|
if fi.Name() != "test2" {
|
||||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
log.Printf("not expected name: %v", fi.Name())
|
||||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||||
}
|
}
|
||||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||||
if di.Remote() != "test/one/test2" {
|
if di.Remote() != "test/one/test2" {
|
||||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
log.Printf("not expected remote: %v", di.Remote())
|
||||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.Logf(nil, "unexpected listing: %v", li)
|
log.Printf("unexpected listing: %v", li)
|
||||||
return fmt.Errorf("unexpected listing: %v", li)
|
return fmt.Errorf("unexpected listing: %v", li)
|
||||||
}
|
}
|
||||||
fs.Logf(nil, "complete listing /test/one/test2")
|
log.Printf("complete listing /test/one/test2")
|
||||||
return nil
|
return nil
|
||||||
}, 12, time.Second*10)
|
}, 12, time.Second*10)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -688,7 +689,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
|||||||
co, ok := o.(*cache.Object)
|
co, ok := o.(*cache.Object)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
for i := range 4 { // read first 4
|
for i := 0; i < 4; i++ { // read first 4
|
||||||
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
||||||
}
|
}
|
||||||
cfs.CleanUpCache(true)
|
cfs.CleanUpCache(true)
|
||||||
@@ -707,7 +708,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined
|
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -742,7 +743,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalBug2117(t *testing.T) {
|
func TestInternalBug2117(t *testing.T) {
|
||||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10)
|
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||||
|
|
||||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||||
@@ -770,24 +771,24 @@ func TestInternalBug2117(t *testing.T) {
|
|||||||
|
|
||||||
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
|
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fs.Logf(nil, "len: %v", len(di))
|
log.Printf("len: %v", len(di))
|
||||||
require.Len(t, di, 1)
|
require.Len(t, di, 1)
|
||||||
|
|
||||||
time.Sleep(time.Second * 30)
|
time.Sleep(time.Second * 30)
|
||||||
|
|
||||||
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
|
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fs.Logf(nil, "len: %v", len(di))
|
log.Printf("len: %v", len(di))
|
||||||
require.Len(t, di, 1)
|
require.Len(t, di, 1)
|
||||||
|
|
||||||
di, err = runInstance.list(t, rootFs, "test/dir1")
|
di, err = runInstance.list(t, rootFs, "test/dir1")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fs.Logf(nil, "len: %v", len(di))
|
log.Printf("len: %v", len(di))
|
||||||
require.Len(t, di, 4)
|
require.Len(t, di, 4)
|
||||||
|
|
||||||
di, err = runInstance.list(t, rootFs, "test")
|
di, err = runInstance.list(t, rootFs, "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fs.Logf(nil, "len: %v", len(di))
|
log.Printf("len: %v", len(di))
|
||||||
require.Len(t, di, 4)
|
require.Len(t, di, 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -828,7 +829,7 @@ func newRun() *run {
|
|||||||
} else {
|
} else {
|
||||||
r.tmpUploadDir = uploadDir
|
r.tmpUploadDir = uploadDir
|
||||||
}
|
}
|
||||||
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir)
|
log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
|
||||||
|
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
@@ -849,8 +850,8 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
|||||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||||
fstest.Initialise()
|
fstest.Initialise()
|
||||||
remoteExists := false
|
remoteExists := false
|
||||||
for _, s := range config.GetRemotes() {
|
for _, s := range config.FileSections() {
|
||||||
if s.Name == remote {
|
if s == remote {
|
||||||
remoteExists = true
|
remoteExists = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -874,12 +875,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
cacheRemote := remote
|
cacheRemote := remote
|
||||||
if !remoteExists {
|
if !remoteExists {
|
||||||
localRemote := remote + "-local"
|
localRemote := remote + "-local"
|
||||||
config.FileSetValue(localRemote, "type", "local")
|
config.FileSet(localRemote, "type", "local")
|
||||||
config.FileSetValue(localRemote, "nounc", "true")
|
config.FileSet(localRemote, "nounc", "true")
|
||||||
m.Set("type", "cache")
|
m.Set("type", "cache")
|
||||||
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
||||||
} else {
|
} else {
|
||||||
remoteType := config.GetValue(remote, "type")
|
remoteType := config.FileGet(remote, "type")
|
||||||
if remoteType == "" {
|
if remoteType == "" {
|
||||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@@ -890,14 +891,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
m.Set("password", cryptPassword1)
|
m.Set("password", cryptPassword1)
|
||||||
m.Set("password2", cryptPassword2)
|
m.Set("password2", cryptPassword2)
|
||||||
}
|
}
|
||||||
remoteRemote := config.GetValue(remote, "remote")
|
remoteRemote := config.FileGet(remote, "remote")
|
||||||
if remoteRemote == "" {
|
if remoteRemote == "" {
|
||||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||||
remoteWrapping := remoteRemoteParts[0]
|
remoteWrapping := remoteRemoteParts[0]
|
||||||
remoteType := config.GetValue(remoteWrapping, "type")
|
remoteType := config.FileGet(remoteWrapping, "type")
|
||||||
if remoteType != "cache" {
|
if remoteType != "cache" {
|
||||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@@ -934,7 +935,8 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
if purge {
|
if purge {
|
||||||
_ = operations.Purge(context.Background(), f, "")
|
_ = f.Features().Purge(context.Background(), "")
|
||||||
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
err = f.Mkdir(context.Background(), "")
|
err = f.Mkdir(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -947,7 +949,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
|
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
|
||||||
err := operations.Purge(context.Background(), f, "")
|
err := f.Features().Purge(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cfs, err := r.getCacheFs(f)
|
cfs, err := r.getCacheFs(f)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -971,7 +973,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
|||||||
f, err := os.CreateTemp("", "rclonecache-tempfile")
|
f, err := os.CreateTemp("", "rclonecache-tempfile")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for range int(cnt) {
|
for i := 0; i < int(cnt); i++ {
|
||||||
data := randStringBytes(int(chunk))
|
data := randStringBytes(int(chunk))
|
||||||
_, _ = f.Write(data)
|
_, _ = f.Write(data)
|
||||||
}
|
}
|
||||||
@@ -1085,9 +1087,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) {
|
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
|
||||||
var err error
|
var err error
|
||||||
var l []any
|
var l []interface{}
|
||||||
var list fs.DirEntries
|
var list fs.DirEntries
|
||||||
list, err = f.List(context.Background(), remote)
|
list, err = f.List(context.Background(), remote)
|
||||||
for _, ll := range list {
|
for _, ll := range list {
|
||||||
@@ -1191,7 +1193,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
|||||||
func (r *run) cleanSize(t *testing.T, size int64) int64 {
|
func (r *run) cleanSize(t *testing.T, size int64) int64 {
|
||||||
if r.rootIsCrypt {
|
if r.rootIsCrypt {
|
||||||
denominator := int64(65536 + 16)
|
denominator := int64(65536 + 16)
|
||||||
size -= 32
|
size = size - 32
|
||||||
quotient := size / denominator
|
quotient := size / denominator
|
||||||
remainder := size % denominator
|
remainder := size % denominator
|
||||||
return (quotient*65536 + remainder - 16)
|
return (quotient*65536 + remainder - 16)
|
||||||
@@ -1215,7 +1217,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
|||||||
var err error
|
var err error
|
||||||
var state cache.BackgroundUploadState
|
var state cache.BackgroundUploadState
|
||||||
|
|
||||||
for range 2 {
|
for i := 0; i < 2; i++ {
|
||||||
select {
|
select {
|
||||||
case state = <-buCh:
|
case state = <-buCh:
|
||||||
// continue
|
// continue
|
||||||
@@ -1293,7 +1295,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str
|
|||||||
|
|
||||||
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
|
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
|
||||||
var err error
|
var err error
|
||||||
for range maxRetries {
|
for i := 0; i < maxRetries; i++ {
|
||||||
err = block()
|
err = block()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
12
backend/cache/cache_test.go
vendored
12
backend/cache/cache_test.go
vendored
@@ -1,6 +1,7 @@
|
|||||||
// Test Cache filesystem interface
|
// Test Cache filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !js && !race
|
//go:build !plan9 && !js && !race
|
||||||
|
// +build !plan9,!js,!race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@@ -15,11 +16,10 @@ import (
|
|||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestCache:",
|
RemoteName: "TestCache:",
|
||||||
NilObject: (*cache.Object)(nil),
|
NilObject: (*cache.Object)(nil),
|
||||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"},
|
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
|
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
||||||
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
|
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
2
backend/cache/cache_unsupported.go
vendored
2
backend/cache/cache_unsupported.go
vendored
@@ -2,6 +2,6 @@
|
|||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || js
|
//go:build plan9 || js
|
||||||
|
// +build plan9 js
|
||||||
|
|
||||||
// Package cache implements a virtual provider to cache existing remotes.
|
|
||||||
package cache
|
package cache
|
||||||
|
|||||||
3
backend/cache/cache_upload_test.go
vendored
3
backend/cache/cache_upload_test.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js && !race
|
//go:build !plan9 && !js && !race
|
||||||
|
// +build !plan9,!js,!race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@@ -162,7 +163,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
|||||||
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
||||||
|
|
||||||
lastFile := ""
|
lastFile := ""
|
||||||
for i := range totalFiles {
|
for i := 0; i < totalFiles; i++ {
|
||||||
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
||||||
testReader := runInstance.randomReader(t, size)
|
testReader := runInstance.randomReader(t, size)
|
||||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||||
|
|||||||
1
backend/cache/directory.go
vendored
1
backend/cache/directory.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
17
backend/cache/handle.go
vendored
17
backend/cache/handle.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
@@ -118,7 +119,7 @@ func (r *Handle) startReadWorkers() {
|
|||||||
r.scaleWorkers(totalWorkers)
|
r.scaleWorkers(totalWorkers)
|
||||||
}
|
}
|
||||||
|
|
||||||
// scaleWorkers will increase the worker pool count by the provided amount
|
// scaleOutWorkers will increase the worker pool count by the provided amount
|
||||||
func (r *Handle) scaleWorkers(desired int) {
|
func (r *Handle) scaleWorkers(desired int) {
|
||||||
current := r.workers
|
current := r.workers
|
||||||
if current == desired {
|
if current == desired {
|
||||||
@@ -182,7 +183,7 @@ func (r *Handle) queueOffset(offset int64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range r.workers {
|
for i := 0; i < r.workers; i++ {
|
||||||
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||||
if o < 0 || o >= r.cachedObject.Size() {
|
if o < 0 || o >= r.cachedObject.Size() {
|
||||||
continue
|
continue
|
||||||
@@ -208,7 +209,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||||
|
|
||||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||||
chunkStart -= offset
|
chunkStart = chunkStart - offset
|
||||||
r.queueOffset(chunkStart)
|
r.queueOffset(chunkStart)
|
||||||
found := false
|
found := false
|
||||||
|
|
||||||
@@ -222,7 +223,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
if !found {
|
if !found {
|
||||||
// we're gonna give the workers a chance to pickup the chunk
|
// we're gonna give the workers a chance to pickup the chunk
|
||||||
// and retry a couple of times
|
// and retry a couple of times
|
||||||
for i := range r.cacheFs().opt.ReadRetries * 8 {
|
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
|
||||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
found = true
|
found = true
|
||||||
@@ -327,7 +328,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
|||||||
|
|
||||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||||
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
|
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||||
}
|
}
|
||||||
r.queueOffset(chunkStart)
|
r.queueOffset(chunkStart)
|
||||||
|
|
||||||
@@ -415,8 +416,10 @@ func (w *worker) run() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
} else {
|
||||||
continue
|
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||||
|
|||||||
1
backend/cache/object.go
vendored
1
backend/cache/object.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
9
backend/cache/plex.go
vendored
9
backend/cache/plex.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
@@ -209,7 +210,7 @@ func (p *plexConnector) authenticate() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var data map[string]any
|
var data map[string]interface{}
|
||||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to obtain token: %w", err)
|
return fmt.Errorf("failed to obtain token: %w", err)
|
||||||
@@ -273,11 +274,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
||||||
func get(m any, path ...any) (any, bool) {
|
func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||||
for _, p := range path {
|
for _, p := range path {
|
||||||
switch idx := p.(type) {
|
switch idx := p.(type) {
|
||||||
case string:
|
case string:
|
||||||
if mm, ok := m.(map[string]any); ok {
|
if mm, ok := m.(map[string]interface{}); ok {
|
||||||
if val, found := mm[idx]; found {
|
if val, found := mm[idx]; found {
|
||||||
m = val
|
m = val
|
||||||
continue
|
continue
|
||||||
@@ -285,7 +286,7 @@ func get(m any, path ...any) (any, bool) {
|
|||||||
}
|
}
|
||||||
return nil, false
|
return nil, false
|
||||||
case int:
|
case int:
|
||||||
if mm, ok := m.([]any); ok {
|
if mm, ok := m.([]interface{}); ok {
|
||||||
if len(mm) > idx {
|
if len(mm) > idx {
|
||||||
m = mm[idx]
|
m = mm[idx]
|
||||||
continue
|
continue
|
||||||
|
|||||||
1
backend/cache/storage_memory.go
vendored
1
backend/cache/storage_memory.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
12
backend/cache/storage_persistent.go
vendored
12
backend/cache/storage_persistent.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
@@ -18,7 +19,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
bolt "go.etcd.io/bbolt"
|
bolt "go.etcd.io/bbolt"
|
||||||
"go.etcd.io/bbolt/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
@@ -598,7 +598,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == errors.ErrDatabaseNotOpen {
|
if err == bolt.ErrDatabaseNotOpen {
|
||||||
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
|
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -607,16 +607,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stats returns a go map with the stats key values
|
// Stats returns a go map with the stats key values
|
||||||
func (b *Persistent) Stats() (map[string]map[string]any, error) {
|
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
|
||||||
r := make(map[string]map[string]any)
|
r := make(map[string]map[string]interface{})
|
||||||
r["data"] = make(map[string]any)
|
r["data"] = make(map[string]interface{})
|
||||||
r["data"]["oldest-ts"] = time.Now()
|
r["data"]["oldest-ts"] = time.Now()
|
||||||
r["data"]["oldest-file"] = ""
|
r["data"]["oldest-file"] = ""
|
||||||
r["data"]["newest-ts"] = time.Now()
|
r["data"]["newest-ts"] = time.Now()
|
||||||
r["data"]["newest-file"] = ""
|
r["data"]["newest-file"] = ""
|
||||||
r["data"]["total-chunks"] = 0
|
r["data"]["total-chunks"] = 0
|
||||||
r["data"]["total-size"] = int64(0)
|
r["data"]["total-size"] = int64(0)
|
||||||
r["files"] = make(map[string]any)
|
r["files"] = make(map[string]interface{})
|
||||||
r["files"]["oldest-ts"] = time.Now()
|
r["files"]["oldest-ts"] = time.Now()
|
||||||
r["files"]["oldest-name"] = ""
|
r["files"]["oldest-name"] = ""
|
||||||
r["files"]["newest-ts"] = time.Now()
|
r["files"]["newest-ts"] = time.Now()
|
||||||
|
|||||||
3
backend/cache/utils_test.go
vendored
3
backend/cache/utils_test.go
vendored
@@ -1,6 +1,3 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import bolt "go.etcd.io/bbolt"
|
import bolt "go.etcd.io/bbolt"
|
||||||
|
|||||||
@@ -29,7 +29,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Chunker's composite files have one or more chunks
|
// Chunker's composite files have one or more chunks
|
||||||
@@ -102,10 +101,8 @@ var (
|
|||||||
//
|
//
|
||||||
// And still chunker's primary function is to chunk large files
|
// And still chunker's primary function is to chunk large files
|
||||||
// rather than serve as a generic metadata container.
|
// rather than serve as a generic metadata container.
|
||||||
const (
|
const maxMetadataSize = 1023
|
||||||
maxMetadataSize = 1023
|
const maxMetadataSizeWritten = 255
|
||||||
maxMetadataSizeWritten = 255
|
|
||||||
)
|
|
||||||
|
|
||||||
// Current/highest supported metadata format.
|
// Current/highest supported metadata format.
|
||||||
const metadataVersion = 2
|
const metadataVersion = 2
|
||||||
@@ -308,6 +305,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
root: rpath,
|
root: rpath,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
}
|
}
|
||||||
|
cache.PinUntilFinalized(f.base, f)
|
||||||
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
|
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
|
||||||
|
|
||||||
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
|
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
|
||||||
@@ -319,45 +317,29 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
// i.e. `rpath` does not exist in the wrapped remote, but chunker
|
// i.e. `rpath` does not exist in the wrapped remote, but chunker
|
||||||
// detects a composite file because it finds the first chunk!
|
// detects a composite file because it finds the first chunk!
|
||||||
// (yet can't satisfy fstest.CheckListing, will ignore)
|
// (yet can't satisfy fstest.CheckListing, will ignore)
|
||||||
if err == nil && !f.useMeta {
|
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
|
||||||
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
|
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
|
||||||
newBase, testErr := cache.Get(ctx, baseName+firstChunkPath)
|
_, testErr := cache.Get(ctx, baseName+firstChunkPath)
|
||||||
if testErr == fs.ErrorIsFile {
|
if testErr == fs.ErrorIsFile {
|
||||||
f.base = newBase
|
|
||||||
err = testErr
|
err = testErr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cache.PinUntilFinalized(f.base, f)
|
|
||||||
|
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note 1: the features here are ones we could support, and they are
|
// Note 1: the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs.
|
// ANDed with the ones from wrappedFs.
|
||||||
// Note 2: features.Fill() points features.PutStream to our PutStream,
|
// Note 2: features.Fill() points features.PutStream to our PutStream,
|
||||||
// but features.Mask() will nullify it if wrappedFs does not have it.
|
// but features.Mask() will nullify it if wrappedFs does not have it.
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
ReadMimeType: false, // Object.MimeType not supported
|
ReadMimeType: false, // Object.MimeType not supported
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ServerSideAcrossConfigs: true,
|
ServerSideAcrossConfigs: true,
|
||||||
ReadDirMetadata: true,
|
|
||||||
WriteDirMetadata: true,
|
|
||||||
WriteDirSetModTime: true,
|
|
||||||
UserDirMetadata: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
|
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
|
||||||
|
|
||||||
f.features.ListR = nil // Recursive listing may cause chunker skip files
|
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||||
f.features.ListP = nil // ListP not supported yet
|
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
@@ -633,7 +615,7 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
|
|||||||
|
|
||||||
// forbidChunk prints error message or raises error if file is chunk.
|
// forbidChunk prints error message or raises error if file is chunk.
|
||||||
// First argument sets log prefix, use `false` to suppress message.
|
// First argument sets log prefix, use `false` to suppress message.
|
||||||
func (f *Fs) forbidChunk(o any, filePath string) error {
|
func (f *Fs) forbidChunk(o interface{}, filePath string) error {
|
||||||
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
|
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
|
||||||
if f.opt.FailHard {
|
if f.opt.FailHard {
|
||||||
return fmt.Errorf("chunk overlap with %q", parentPath)
|
return fmt.Errorf("chunk overlap with %q", parentPath)
|
||||||
@@ -681,7 +663,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
|
|||||||
circleSec := unixSec % closestPrimeZzzzSeconds
|
circleSec := unixSec % closestPrimeZzzzSeconds
|
||||||
first4chars := strconv.FormatInt(circleSec, 36)
|
first4chars := strconv.FormatInt(circleSec, 36)
|
||||||
|
|
||||||
for range maxTransactionProbes {
|
for tries := 0; tries < maxTransactionProbes; tries++ {
|
||||||
f.xactIDMutex.Lock()
|
f.xactIDMutex.Lock()
|
||||||
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
|
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
|
||||||
f.xactIDMutex.Unlock()
|
f.xactIDMutex.Unlock()
|
||||||
@@ -831,7 +813,8 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
|||||||
}
|
}
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
isSubdir[entry.Remote()] = true
|
isSubdir[entry.Remote()] = true
|
||||||
wrapDir := fs.NewDirWrapper(entry.Remote(), entry)
|
wrapDir := fs.NewDirCopy(ctx, entry)
|
||||||
|
wrapDir.SetRemote(entry.Remote())
|
||||||
tempEntries = append(tempEntries, wrapDir)
|
tempEntries = append(tempEntries, wrapDir)
|
||||||
default:
|
default:
|
||||||
if f.opt.FailHard {
|
if f.opt.FailHard {
|
||||||
@@ -964,11 +947,6 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||||||
}
|
}
|
||||||
if caseInsensitive {
|
if caseInsensitive {
|
||||||
sameMain = strings.EqualFold(mainRemote, remote)
|
sameMain = strings.EqualFold(mainRemote, remote)
|
||||||
if sameMain && f.base.Features().IsLocal {
|
|
||||||
// on local, make sure the EqualFold still holds true when accounting for encoding.
|
|
||||||
// sometimes paths with special characters will only normalize the same way in Standard Encoding.
|
|
||||||
sameMain = strings.EqualFold(encoder.OS.FromStandardPath(mainRemote), encoder.OS.FromStandardPath(remote))
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
sameMain = mainRemote == remote
|
sameMain = mainRemote == remote
|
||||||
}
|
}
|
||||||
@@ -982,13 +960,13 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
||||||
if err := o.addChunk(entry, chunkNo); err != nil {
|
if err := o.addChunk(entry, chunkNo); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if o.main == nil && len(o.chunks) == 0 {
|
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
||||||
// Scanning hasn't found data chunks with conforming names.
|
// Scanning hasn't found data chunks with conforming names.
|
||||||
if f.useMeta || quickScan {
|
if f.useMeta || quickScan {
|
||||||
// Metadata is required but absent and there are no chunks.
|
// Metadata is required but absent and there are no chunks.
|
||||||
@@ -1144,8 +1122,8 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
|||||||
// put implements Put, PutStream, PutUnchecked, Update
|
// put implements Put, PutStream, PutUnchecked, Update
|
||||||
func (f *Fs) put(
|
func (f *Fs) put(
|
||||||
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
||||||
basePut putFn, action string, target fs.Object,
|
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
|
||||||
) (obj fs.Object, err error) {
|
|
||||||
// Perform consistency checks
|
// Perform consistency checks
|
||||||
if err := f.forbidChunk(src, remote); err != nil {
|
if err := f.forbidChunk(src, remote); err != nil {
|
||||||
return nil, fmt.Errorf("%s refused: %w", action, err)
|
return nil, fmt.Errorf("%s refused: %w", action, err)
|
||||||
@@ -1190,7 +1168,10 @@ func (f *Fs) put(
|
|||||||
}
|
}
|
||||||
|
|
||||||
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
|
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
|
||||||
size := min(c.sizeLeft, c.chunkSize)
|
size := c.sizeLeft
|
||||||
|
if size > c.chunkSize {
|
||||||
|
size = c.chunkSize
|
||||||
|
}
|
||||||
savedReadCount := c.readCount
|
savedReadCount := c.readCount
|
||||||
|
|
||||||
// If a single chunk is expected, avoid the extra rename operation
|
// If a single chunk is expected, avoid the extra rename operation
|
||||||
@@ -1475,7 +1456,10 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
|
|||||||
const bufLen = 1048576 // 1 MiB
|
const bufLen = 1048576 // 1 MiB
|
||||||
buf := make([]byte, bufLen)
|
buf := make([]byte, bufLen)
|
||||||
for size > 0 {
|
for size > 0 {
|
||||||
n := min(size, bufLen)
|
n := size
|
||||||
|
if n > bufLen {
|
||||||
|
n = bufLen
|
||||||
|
}
|
||||||
if _, err := io.ReadFull(in, buf[0:n]); err != nil {
|
if _, err := io.ReadFull(in, buf[0:n]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1579,14 +1563,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return f.base.Mkdir(ctx, dir)
|
return f.base.Mkdir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MkdirMetadata makes the root directory of the Fs object
|
|
||||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
||||||
if do := f.base.Features().MkdirMetadata; do != nil {
|
|
||||||
return do(ctx, dir, metadata)
|
|
||||||
}
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
@@ -1861,8 +1837,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
// baseMove chains to the wrapped Move or simulates it by Copy+Delete
|
// baseMove chains to the wrapped Move or simulates it by Copy+Delete
|
||||||
func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) {
|
func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) {
|
||||||
ctx, ci := fs.AddConfig(ctx)
|
|
||||||
ci.NameTransform = nil // ensure operations.Move does not double-transform here
|
|
||||||
var (
|
var (
|
||||||
dest fs.Object
|
dest fs.Object
|
||||||
err error
|
err error
|
||||||
@@ -1906,14 +1880,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return do(ctx, srcFs.base, srcRemote, dstRemote)
|
return do(ctx, srcFs.base, srcRemote, dstRemote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
if do := f.base.Features().DirSetModTime; do != nil {
|
|
||||||
return do(ctx, dir, modTime)
|
|
||||||
}
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
// CleanUp the trash in the Fs
|
||||||
//
|
//
|
||||||
// Implement this if you have a way of emptying the trash or
|
// Implement this if you have a way of emptying the trash or
|
||||||
@@ -1962,7 +1928,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||||
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
//fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||||
if entryType == fs.EntryObject {
|
if entryType == fs.EntryObject {
|
||||||
mainPath, _, _, xactID := f.parseChunkName(path)
|
mainPath, _, _, xactID := f.parseChunkName(path)
|
||||||
metaXactID := ""
|
metaXactID := ""
|
||||||
@@ -2477,7 +2443,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
|
|||||||
if len(data) > maxMetadataSizeWritten {
|
if len(data) > maxMetadataSizeWritten {
|
||||||
return nil, false, ErrMetaTooBig
|
return nil, false, ErrMetaTooBig
|
||||||
}
|
}
|
||||||
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||||
return nil, false, errors.New("invalid json")
|
return nil, false, errors.New("invalid json")
|
||||||
}
|
}
|
||||||
var metadata metaSimpleJSON
|
var metadata metaSimpleJSON
|
||||||
@@ -2574,8 +2540,6 @@ var (
|
|||||||
_ fs.Copier = (*Fs)(nil)
|
_ fs.Copier = (*Fs)(nil)
|
||||||
_ fs.Mover = (*Fs)(nil)
|
_ fs.Mover = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
|
||||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
_ fs.PutStreamer = (*Fs)(nil)
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type settings map[string]any
|
type settings map[string]interface{}
|
||||||
|
|
||||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
||||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||||
|
|||||||
@@ -36,7 +36,6 @@ func TestIntegration(t *testing.T) {
|
|||||||
"GetTier",
|
"GetTier",
|
||||||
"SetTier",
|
"SetTier",
|
||||||
"Metadata",
|
"Metadata",
|
||||||
"SetMetadata",
|
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{
|
UnimplementableFsMethods: []string{
|
||||||
"PublicLink",
|
"PublicLink",
|
||||||
@@ -46,7 +45,6 @@ func TestIntegration(t *testing.T) {
|
|||||||
"DirCacheFlush",
|
"DirCacheFlush",
|
||||||
"UserInfo",
|
"UserInfo",
|
||||||
"Disconnect",
|
"Disconnect",
|
||||||
"ListP",
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if *fstest.RemoteName == "" {
|
if *fstest.RemoteName == "" {
|
||||||
|
|||||||
@@ -1,48 +0,0 @@
|
|||||||
// Package api has type definitions for cloudinary
|
|
||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CloudinaryEncoder extends the built-in encoder
|
|
||||||
type CloudinaryEncoder interface {
|
|
||||||
// FromStandardPath takes a / separated path in Standard encoding
|
|
||||||
// and converts it to a / separated path in this encoding.
|
|
||||||
FromStandardPath(string) string
|
|
||||||
// FromStandardName takes name in Standard encoding and converts
|
|
||||||
// it in this encoding.
|
|
||||||
FromStandardName(string) string
|
|
||||||
// ToStandardPath takes a / separated path in this encoding
|
|
||||||
// and converts it to a / separated path in Standard encoding.
|
|
||||||
ToStandardPath(string) string
|
|
||||||
// ToStandardName takes name in this encoding and converts
|
|
||||||
// it in Standard encoding.
|
|
||||||
ToStandardName(string, string) string
|
|
||||||
// Encoded root of the remote (as passed into NewFs)
|
|
||||||
FromStandardFullPath(string) string
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateOptions was created to pass options from Update to Put
|
|
||||||
type UpdateOptions struct {
|
|
||||||
PublicID string
|
|
||||||
ResourceType string
|
|
||||||
DeliveryType string
|
|
||||||
AssetFolder string
|
|
||||||
DisplayName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Header formats the option as a string
|
|
||||||
func (o *UpdateOptions) Header() (string, string) {
|
|
||||||
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mandatory returns whether the option must be parsed or can be ignored
|
|
||||||
func (o *UpdateOptions) Mandatory() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// String formats the option into human-readable form
|
|
||||||
func (o *UpdateOptions) String() string {
|
|
||||||
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
|
||||||
}
|
|
||||||
@@ -1,754 +0,0 @@
|
|||||||
// Package cloudinary provides an interface to the Cloudinary DAM
|
|
||||||
package cloudinary
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"slices"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2"
|
|
||||||
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2/api/admin"
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
|
|
||||||
"github.com/rclone/rclone/backend/cloudinary/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
"github.com/zeebo/blake3"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cloudinary shouldn't have a trailing dot if there is no path
|
|
||||||
func cldPathDir(somePath string) string {
|
|
||||||
if somePath == "" || somePath == "." {
|
|
||||||
return somePath
|
|
||||||
}
|
|
||||||
dir := path.Dir(somePath)
|
|
||||||
if dir == "." {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return dir
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register with Fs
|
|
||||||
func init() {
|
|
||||||
fs.Register(&fs.RegInfo{
|
|
||||||
Name: "cloudinary",
|
|
||||||
Description: "Cloudinary",
|
|
||||||
NewFs: NewFs,
|
|
||||||
Options: []fs.Option{
|
|
||||||
{
|
|
||||||
Name: "cloud_name",
|
|
||||||
Help: "Cloudinary Environment Name",
|
|
||||||
Required: true,
|
|
||||||
Sensitive: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "api_key",
|
|
||||||
Help: "Cloudinary API Key",
|
|
||||||
Required: true,
|
|
||||||
Sensitive: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "api_secret",
|
|
||||||
Help: "Cloudinary API Secret",
|
|
||||||
Required: true,
|
|
||||||
Sensitive: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "upload_prefix",
|
|
||||||
Help: "Specify the API endpoint for environments out of the US",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "upload_preset",
|
|
||||||
Help: "Upload Preset to select asset manipulation on upload",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: config.ConfigEncoding,
|
|
||||||
Help: config.ConfigEncodingHelp,
|
|
||||||
Advanced: true,
|
|
||||||
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
|
|
||||||
encoder.EncodeSlash |
|
|
||||||
encoder.EncodeLtGt |
|
|
||||||
encoder.EncodeDoubleQuote |
|
|
||||||
encoder.EncodeQuestion |
|
|
||||||
encoder.EncodeAsterisk |
|
|
||||||
encoder.EncodePipe |
|
|
||||||
encoder.EncodeHash |
|
|
||||||
encoder.EncodePercent |
|
|
||||||
encoder.EncodeBackSlash |
|
|
||||||
encoder.EncodeDel |
|
|
||||||
encoder.EncodeCtl |
|
|
||||||
encoder.EncodeRightSpace |
|
|
||||||
encoder.EncodeInvalidUtf8 |
|
|
||||||
encoder.EncodeDot),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "eventually_consistent_delay",
|
|
||||||
Default: fs.Duration(0),
|
|
||||||
Advanced: true,
|
|
||||||
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "adjust_media_files_extensions",
|
|
||||||
Default: true,
|
|
||||||
Advanced: true,
|
|
||||||
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "media_extensions",
|
|
||||||
Default: []string{
|
|
||||||
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
|
|
||||||
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
|
|
||||||
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
|
|
||||||
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
|
|
||||||
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
|
|
||||||
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
|
|
||||||
Advanced: true,
|
|
||||||
Help: "Cloudinary supported media extensions",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
|
||||||
type Options struct {
|
|
||||||
CloudName string `config:"cloud_name"`
|
|
||||||
APIKey string `config:"api_key"`
|
|
||||||
APISecret string `config:"api_secret"`
|
|
||||||
UploadPrefix string `config:"upload_prefix"`
|
|
||||||
UploadPreset string `config:"upload_preset"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
|
||||||
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
|
|
||||||
MediaExtensions []string `config:"media_extensions"`
|
|
||||||
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents a remote cloudinary server
|
|
||||||
type Fs struct {
|
|
||||||
name string
|
|
||||||
root string
|
|
||||||
opt Options
|
|
||||||
features *fs.Features
|
|
||||||
pacer *fs.Pacer
|
|
||||||
srv *rest.Client // For downloading assets via the Cloudinary CDN
|
|
||||||
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
|
|
||||||
lastCRUD time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object describes a cloudinary object
|
|
||||||
type Object struct {
|
|
||||||
fs *Fs
|
|
||||||
remote string
|
|
||||||
size int64
|
|
||||||
modTime time.Time
|
|
||||||
url string
|
|
||||||
md5sum string
|
|
||||||
publicID string
|
|
||||||
resourceType string
|
|
||||||
deliveryType string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
|
||||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
|
||||||
opt := new(Options)
|
|
||||||
err := configstruct.Set(m, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize the Cloudinary client
|
|
||||||
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
|
|
||||||
}
|
|
||||||
cld.Admin.Client = *fshttp.NewClient(ctx)
|
|
||||||
cld.Upload.Client = *fshttp.NewClient(ctx)
|
|
||||||
if opt.UploadPrefix != "" {
|
|
||||||
cld.Config.API.UploadPrefix = opt.UploadPrefix
|
|
||||||
}
|
|
||||||
client := fshttp.NewClient(ctx)
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
root: root,
|
|
||||||
opt: *opt,
|
|
||||||
cld: cld,
|
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
|
|
||||||
srv: rest.NewClient(client),
|
|
||||||
}
|
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(ctx, f)
|
|
||||||
|
|
||||||
if root != "" {
|
|
||||||
// Check to see if the root actually an existing file
|
|
||||||
remote := path.Base(root)
|
|
||||||
f.root = cldPathDir(root)
|
|
||||||
_, err := f.NewObject(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
|
|
||||||
// File doesn't exist so return the previous root
|
|
||||||
f.root = root
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// return an error with an fs which points to the parent
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// FromStandardPath implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) FromStandardPath(s string) string {
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromStandardName implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) FromStandardName(s string) string {
|
|
||||||
if f.opt.AdjustMediaFilesExtensions {
|
|
||||||
parsedURL, err := url.Parse(s)
|
|
||||||
ext := ""
|
|
||||||
if err != nil {
|
|
||||||
fs.Logf(nil, "Error parsing URL: %v", err)
|
|
||||||
} else {
|
|
||||||
ext = path.Ext(parsedURL.Path)
|
|
||||||
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
|
|
||||||
s = strings.TrimSuffix(parsedURL.Path, ext)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToStandardPath implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) ToStandardPath(s string) string {
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToStandardName implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) ToStandardName(s string, assetURL string) string {
|
|
||||||
ext := ""
|
|
||||||
if f.opt.AdjustMediaFilesExtensions {
|
|
||||||
parsedURL, err := url.Parse(assetURL)
|
|
||||||
if err != nil {
|
|
||||||
fs.Logf(nil, "Error parsing URL: %v", err)
|
|
||||||
} else {
|
|
||||||
ext = path.Ext(parsedURL.Path)
|
|
||||||
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
|
|
||||||
ext = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromStandardFullPath encodes a full path to Cloudinary standard
|
|
||||||
func (f *Fs) FromStandardFullPath(dir string) string {
|
|
||||||
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
|
|
||||||
func (f *Fs) ToAssetFolderAPI(dir string) string {
|
|
||||||
return strings.ReplaceAll(dir, "%", "%25")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToDisplayNameElastic encodes a special case of elasticsearch
|
|
||||||
func (f *Fs) ToDisplayNameElastic(dir string) string {
|
|
||||||
return strings.ReplaceAll(dir, "!", "\\!")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitEventuallyConsistent waits till the FS is eventually consistent
|
|
||||||
func (f *Fs) WaitEventuallyConsistent() {
|
|
||||||
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
delay := time.Duration(f.opt.EventuallyConsistentDelay)
|
|
||||||
timeSinceLastCRUD := time.Since(f.lastCRUD)
|
|
||||||
if timeSinceLastCRUD < delay {
|
|
||||||
time.Sleep(delay - timeSinceLastCRUD)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// String converts this Fs to a string
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("Cloudinary root '%s'", f.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
|
||||||
remotePrefix := f.FromStandardFullPath(dir)
|
|
||||||
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
|
|
||||||
remotePrefix += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
var entries fs.DirEntries
|
|
||||||
dirs := make(map[string]struct{})
|
|
||||||
nextCursor := ""
|
|
||||||
f.WaitEventuallyConsistent()
|
|
||||||
for {
|
|
||||||
// user the folders api to list folders.
|
|
||||||
folderParams := admin.SubFoldersParams{
|
|
||||||
Folder: f.ToAssetFolderAPI(remotePrefix),
|
|
||||||
MaxResults: 500,
|
|
||||||
}
|
|
||||||
if nextCursor != "" {
|
|
||||||
folderParams.NextCursor = nextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
|
|
||||||
}
|
|
||||||
if results.Error.Message != "" {
|
|
||||||
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, folder := range results.Folders {
|
|
||||||
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
|
|
||||||
parts := strings.Split(relativePath, "/")
|
|
||||||
|
|
||||||
// It's a directory
|
|
||||||
dirName := parts[len(parts)-1]
|
|
||||||
if _, found := dirs[dirName]; !found {
|
|
||||||
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
|
|
||||||
entries = append(entries, d)
|
|
||||||
dirs[dirName] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Break if there are no more results
|
|
||||||
if results.NextCursor == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
nextCursor = results.NextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Use the assets.AssetsByAssetFolder API to list assets
|
|
||||||
assetsParams := admin.AssetsByAssetFolderParams{
|
|
||||||
AssetFolder: remotePrefix,
|
|
||||||
MaxResults: 500,
|
|
||||||
}
|
|
||||||
if nextCursor != "" {
|
|
||||||
assetsParams.NextCursor = nextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to list assets: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, asset := range results.Assets {
|
|
||||||
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
size: int64(asset.Bytes),
|
|
||||||
modTime: asset.CreatedAt,
|
|
||||||
url: asset.SecureURL,
|
|
||||||
publicID: asset.PublicID,
|
|
||||||
resourceType: asset.AssetType,
|
|
||||||
deliveryType: asset.Type,
|
|
||||||
}
|
|
||||||
entries = append(entries, o)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Break if there are no more results
|
|
||||||
if results.NextCursor == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
nextCursor = results.NextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
searchParams := search.Query{
|
|
||||||
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
|
|
||||||
f.FromStandardFullPath(cldPathDir(remote)),
|
|
||||||
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
|
|
||||||
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
|
|
||||||
MaxResults: 2,
|
|
||||||
}
|
|
||||||
var results *admin.SearchResult
|
|
||||||
f.WaitEventuallyConsistent()
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
var err1 error
|
|
||||||
results, err1 = f.cld.Admin.Search(ctx, searchParams)
|
|
||||||
if err1 == nil && results.TotalCount != len(results.Assets) {
|
|
||||||
err1 = errors.New("partial response so waiting for eventual consistency")
|
|
||||||
}
|
|
||||||
return shouldRetry(ctx, nil, err1)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
if results.TotalCount == 0 || len(results.Assets) == 0 {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
asset := results.Assets[0]
|
|
||||||
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
size: int64(asset.Bytes),
|
|
||||||
modTime: asset.UploadedAt,
|
|
||||||
url: asset.SecureURL,
|
|
||||||
md5sum: asset.Etag,
|
|
||||||
publicID: asset.PublicID,
|
|
||||||
resourceType: asset.ResourceType,
|
|
||||||
deliveryType: asset.Type,
|
|
||||||
}
|
|
||||||
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
|
|
||||||
payload := []byte(path.Join(assetFolder, displayName))
|
|
||||||
hash := blake3.Sum256(payload)
|
|
||||||
return hex.EncodeToString(hash[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put uploads content to Cloudinary
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
if src.Size() == 0 {
|
|
||||||
return nil, fs.ErrorCantUploadEmptyFiles
|
|
||||||
}
|
|
||||||
|
|
||||||
params := uploader.UploadParams{
|
|
||||||
UploadPreset: f.opt.UploadPreset,
|
|
||||||
}
|
|
||||||
|
|
||||||
updateObject := false
|
|
||||||
var modTime time.Time
|
|
||||||
for _, option := range options {
|
|
||||||
if updateOptions, ok := option.(*api.UpdateOptions); ok {
|
|
||||||
if updateOptions.PublicID != "" {
|
|
||||||
updateObject = true
|
|
||||||
params.Overwrite = SDKApi.Bool(true)
|
|
||||||
params.Invalidate = SDKApi.Bool(true)
|
|
||||||
params.PublicID = updateOptions.PublicID
|
|
||||||
params.ResourceType = updateOptions.ResourceType
|
|
||||||
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
|
|
||||||
params.AssetFolder = updateOptions.AssetFolder
|
|
||||||
params.DisplayName = updateOptions.DisplayName
|
|
||||||
modTime = src.ModTime(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !updateObject {
|
|
||||||
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
|
|
||||||
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
|
|
||||||
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
|
|
||||||
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
|
|
||||||
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
|
|
||||||
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
|
|
||||||
}
|
|
||||||
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
|
|
||||||
f.lastCRUD = time.Now()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
|
|
||||||
}
|
|
||||||
if !updateObject {
|
|
||||||
modTime = uploadResult.CreatedAt
|
|
||||||
}
|
|
||||||
if uploadResult.Error.Message != "" {
|
|
||||||
return nil, errors.New(uploadResult.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: src.Remote(),
|
|
||||||
size: int64(uploadResult.Bytes),
|
|
||||||
modTime: modTime,
|
|
||||||
url: uploadResult.SecureURL,
|
|
||||||
md5sum: uploadResult.Etag,
|
|
||||||
publicID: uploadResult.PublicID,
|
|
||||||
resourceType: uploadResult.ResourceType,
|
|
||||||
deliveryType: uploadResult.Type,
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision of the remote
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return fs.ModTimeNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns the supported hash sets
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.Set(hash.MD5)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir creates empty folders
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
|
|
||||||
res, err := f.cld.Admin.CreateFolder(ctx, params)
|
|
||||||
f.lastCRUD = time.Now()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if res.Error.Message != "" {
|
|
||||||
return errors.New(res.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir deletes empty folders
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
// Additional test because Cloudinary will delete folders without
|
|
||||||
// assets, regardless of empty sub-folders
|
|
||||||
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
|
|
||||||
folderParams := admin.SubFoldersParams{
|
|
||||||
Folder: folder,
|
|
||||||
MaxResults: 1,
|
|
||||||
}
|
|
||||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if results.TotalCount > 0 {
|
|
||||||
return fs.ErrorDirectoryNotEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
params := admin.DeleteFolderParams{Folder: folder}
|
|
||||||
res, err := f.cld.Admin.DeleteFolder(ctx, params)
|
|
||||||
f.lastCRUD = time.Now()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if res.Error.Message != "" {
|
|
||||||
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
|
|
||||||
return fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.New(res.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
|
||||||
var retryErrorCodes = []int{
|
|
||||||
420, // Too Many Requests (legacy)
|
|
||||||
429, // Too Many Requests
|
|
||||||
500, // Internal Server Error
|
|
||||||
502, // Bad Gateway
|
|
||||||
503, // Service Unavailable
|
|
||||||
504, // Gateway Timeout
|
|
||||||
509, // Bandwidth Limit Exceeded
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
|
||||||
// deserve to be retried. It returns the err as a convenience
|
|
||||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
|
||||||
if fserrors.ContextError(ctx, &err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
tryAgain := "Try again on "
|
|
||||||
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
|
|
||||||
layout := "2006-01-02 15:04:05 UTC"
|
|
||||||
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
|
|
||||||
timestamp, err2 := time.Parse(layout, dateStr)
|
|
||||||
if err2 == nil {
|
|
||||||
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(nil, "Retrying API error %v", err)
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// Hash returns the MD5 of an object
|
|
||||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
|
||||||
if ty != hash.MD5 {
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
return o.md5sum, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a string version
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote returns the remote path
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return o.modTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size of object in bytes
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return o.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable returns if this object is storable
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
||||||
return fs.ErrorCantSetModTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open an object for read
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|
||||||
var resp *http.Response
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: o.url,
|
|
||||||
Options: options,
|
|
||||||
}
|
|
||||||
var offset int64
|
|
||||||
var count int64
|
|
||||||
var key string
|
|
||||||
var value string
|
|
||||||
fs.FixRangeOption(options, o.size)
|
|
||||||
for _, option := range options {
|
|
||||||
switch x := option.(type) {
|
|
||||||
case *fs.RangeOption:
|
|
||||||
offset, count = x.Decode(o.size)
|
|
||||||
if count < 0 {
|
|
||||||
count = o.size - offset
|
|
||||||
}
|
|
||||||
key, value = option.Header()
|
|
||||||
case *fs.SeekOption:
|
|
||||||
offset = x.Offset
|
|
||||||
count = o.size - offset
|
|
||||||
key, value = option.Header()
|
|
||||||
default:
|
|
||||||
if option.Mandatory() {
|
|
||||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if key != "" && value != "" {
|
|
||||||
opts.ExtraHeaders = make(map[string]string)
|
|
||||||
opts.ExtraHeaders[key] = value
|
|
||||||
}
|
|
||||||
// Make sure that the asset is fully available
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
|
||||||
if err == nil {
|
|
||||||
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
|
|
||||||
if clErr == nil && count == int64(cl) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
|
|
||||||
}
|
|
||||||
return resp.Body, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the object with the contents of the io.Reader
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
options = append(options, &api.UpdateOptions{
|
|
||||||
PublicID: o.publicID,
|
|
||||||
ResourceType: o.resourceType,
|
|
||||||
DeliveryType: o.deliveryType,
|
|
||||||
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
|
|
||||||
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
|
|
||||||
})
|
|
||||||
updatedObj, err := o.fs.Put(ctx, in, src, options...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if uo, ok := updatedObj.(*Object); ok {
|
|
||||||
o.size = uo.size
|
|
||||||
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
|
|
||||||
o.url = uo.url
|
|
||||||
o.md5sum = uo.md5sum
|
|
||||||
o.publicID = uo.publicID
|
|
||||||
o.resourceType = uo.resourceType
|
|
||||||
o.deliveryType = uo.deliveryType
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
params := uploader.DestroyParams{
|
|
||||||
PublicID: o.publicID,
|
|
||||||
ResourceType: o.resourceType,
|
|
||||||
Type: o.deliveryType,
|
|
||||||
}
|
|
||||||
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
|
|
||||||
o.fs.lastCRUD = time.Now()
|
|
||||||
if dErr != nil {
|
|
||||||
return dErr
|
|
||||||
}
|
|
||||||
|
|
||||||
if res.Error.Message != "" {
|
|
||||||
return errors.New(res.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
if res.Result != "ok" {
|
|
||||||
return errors.New(res.Result)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
// Test Cloudinary filesystem interface
|
|
||||||
|
|
||||||
package cloudinary_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/cloudinary"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
name := "TestCloudinary"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
NilObject: (*cloudinary.Object)(nil),
|
|
||||||
SkipInvalidUTF8: true,
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -20,7 +20,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/list"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
@@ -223,23 +222,18 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
|||||||
}
|
}
|
||||||
// check features
|
// check features
|
||||||
var features = (&fs.Features{
|
var features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
DuplicateFiles: false,
|
DuplicateFiles: false,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
ReadMetadata: true,
|
ReadMetadata: true,
|
||||||
WriteMetadata: true,
|
WriteMetadata: true,
|
||||||
UserMetadata: true,
|
UserMetadata: true,
|
||||||
ReadDirMetadata: true,
|
PartialUploads: true,
|
||||||
WriteDirMetadata: true,
|
|
||||||
WriteDirSetModTime: true,
|
|
||||||
UserDirMetadata: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
canMove := true
|
canMove := true
|
||||||
for _, u := range f.upstreams {
|
for _, u := range f.upstreams {
|
||||||
@@ -266,9 +260,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enable ListP always
|
|
||||||
features.ListP = f.ListP
|
|
||||||
|
|
||||||
// Enable Purge when any upstreams support it
|
// Enable Purge when any upstreams support it
|
||||||
if features.Purge == nil {
|
if features.Purge == nil {
|
||||||
for _, u := range f.upstreams {
|
for _, u := range f.upstreams {
|
||||||
@@ -449,32 +440,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return u.f.Mkdir(ctx, uRemote)
|
return u.f.Mkdir(ctx, uRemote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MkdirMetadata makes the root directory of the Fs object
|
|
||||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
||||||
u, uRemote, err := f.findUpstream(dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
do := u.f.Features().MkdirMetadata
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
newDir, err := do(ctx, uRemote, metadata)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
entries := fs.DirEntries{newDir}
|
|
||||||
entries, err = u.wrapEntries(ctx, entries)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
newDir, ok := entries[0].(fs.Directory)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
|
|
||||||
}
|
|
||||||
return newDir, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// purge the upstream or fallback to a slow way
|
// purge the upstream or fallback to a slow way
|
||||||
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
|
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
|
||||||
if do := u.f.Features().Purge; do != nil {
|
if do := u.f.Features().Purge; do != nil {
|
||||||
@@ -790,11 +755,12 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
|
|||||||
case fs.Object:
|
case fs.Object:
|
||||||
entries[i] = u.newObject(x)
|
entries[i] = u.newObject(x)
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
newPath, err := u.pathAdjustment.do(x.Remote())
|
newDir := fs.NewDirCopy(ctx, x)
|
||||||
|
newPath, err := u.pathAdjustment.do(newDir.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
newDir := fs.NewDirWrapper(newPath, x)
|
newDir.SetRemote(newPath)
|
||||||
entries[i] = newDir
|
entries[i] = newDir
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown entry type %T", entry)
|
return nil, fmt.Errorf("unknown entry type %T", entry)
|
||||||
@@ -813,52 +779,24 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
return list.WithListP(ctx, dir, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListP lists the objects and directories of the Fs starting
|
|
||||||
// from dir non recursively into out.
|
|
||||||
//
|
|
||||||
// dir should be "" to start from the root, and should not
|
|
||||||
// have trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
//
|
|
||||||
// It should call callback for each tranche of entries read.
|
|
||||||
// These need not be returned in any particular order. If
|
|
||||||
// callback returns an error then the listing will stop
|
|
||||||
// immediately.
|
|
||||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
|
||||||
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
||||||
if f.root == "" && dir == "" {
|
if f.root == "" && dir == "" {
|
||||||
entries := make(fs.DirEntries, 0, len(f.upstreams))
|
entries = make(fs.DirEntries, 0, len(f.upstreams))
|
||||||
for combineDir := range f.upstreams {
|
for combineDir := range f.upstreams {
|
||||||
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
|
d := fs.NewDir(combineDir, f.when)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
}
|
}
|
||||||
return callback(entries)
|
return entries, nil
|
||||||
}
|
}
|
||||||
u, uRemote, err := f.findUpstream(dir)
|
u, uRemote, err := f.findUpstream(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
wrappedCallback := func(entries fs.DirEntries) error {
|
entries, err = u.f.List(ctx, uRemote)
|
||||||
entries, err := u.wrapEntries(ctx, entries)
|
if err != nil {
|
||||||
if err != nil {
|
return nil, err
|
||||||
return err
|
|
||||||
}
|
|
||||||
return callback(entries)
|
|
||||||
}
|
}
|
||||||
listP := u.f.Features().ListP
|
return u.wrapEntries(ctx, entries)
|
||||||
if listP == nil {
|
|
||||||
entries, err := u.f.List(ctx, uRemote)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return wrappedCallback(entries)
|
|
||||||
}
|
|
||||||
return listP(ctx, uRemote, wrappedCallback)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -1027,22 +965,6 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
return do(ctx, uDirs)
|
return do(ctx, uDirs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
u, uDir, err := f.findUpstream(dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if uDir == "" {
|
|
||||||
fs.Debugf(dir, "Can't set modtime on upstream root. skipping.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if do := u.f.Features().DirSetModTime; do != nil {
|
|
||||||
return do(ctx, uDir, modTime)
|
|
||||||
}
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
// CleanUp the trash in the Fs
|
||||||
//
|
//
|
||||||
// Implement this if you have a way of emptying the trash or
|
// Implement this if you have a way of emptying the trash or
|
||||||
@@ -1151,17 +1073,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||||||
return do.Metadata(ctx)
|
return do.Metadata(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMetadata sets metadata for an Object
|
|
||||||
//
|
|
||||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
|
||||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
|
||||||
do, ok := o.Object.(fs.SetMetadataer)
|
|
||||||
if !ok {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do.SetMetadata(ctx, metadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTier performs changing storage tier of the Object if
|
// SetTier performs changing storage tier of the Object if
|
||||||
// multiple storage classes supported
|
// multiple storage classes supported
|
||||||
func (o *Object) SetTier(tier string) error {
|
func (o *Object) SetTier(tier string) error {
|
||||||
@@ -1188,8 +1099,6 @@ var (
|
|||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
_ fs.MergeDirser = (*Fs)(nil)
|
||||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
|
||||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
_ fs.OpenWriterAter = (*Fs)(nil)
|
_ fs.OpenWriterAter = (*Fs)(nil)
|
||||||
_ fs.FullObject = (*Object)(nil)
|
_ fs.FullObject = (*Object)(nil)
|
||||||
|
|||||||
@@ -14,7 +14,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -29,7 +28,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/list"
|
|
||||||
"github.com/rclone/rclone/fs/log"
|
"github.com/rclone/rclone/fs/log"
|
||||||
"github.com/rclone/rclone/fs/object"
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
@@ -39,7 +37,6 @@ import (
|
|||||||
const (
|
const (
|
||||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
||||||
chunkStreams = 0 // Streams to use for reading
|
|
||||||
|
|
||||||
bufferSize = 8388608
|
bufferSize = 8388608
|
||||||
heuristicBytes = 1048576
|
heuristicBytes = 1048576
|
||||||
@@ -175,33 +172,21 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
opt: *opt,
|
opt: *opt,
|
||||||
mode: compressionModeFromName(opt.CompressionMode),
|
mode: compressionModeFromName(opt.CompressionMode),
|
||||||
}
|
}
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// the features here are ones we could support, and they are
|
// the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs
|
// ANDed with the ones from wrappedFs
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
DuplicateFiles: false,
|
DuplicateFiles: false,
|
||||||
ReadMimeType: false,
|
ReadMimeType: false,
|
||||||
WriteMimeType: false,
|
WriteMimeType: false,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ReadMetadata: true,
|
ReadMetadata: true,
|
||||||
WriteMetadata: true,
|
WriteMetadata: true,
|
||||||
UserMetadata: true,
|
UserMetadata: true,
|
||||||
ReadDirMetadata: true,
|
PartialUploads: true,
|
||||||
WriteDirMetadata: true,
|
|
||||||
WriteDirSetModTime: true,
|
|
||||||
UserDirMetadata: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
// We support reading MIME types no matter the wrapped fs
|
// We support reading MIME types no matter the wrapped fs
|
||||||
f.features.ReadMimeType = true
|
f.features.ReadMimeType = true
|
||||||
@@ -209,8 +194,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
if !operations.CanServerSideMove(wrappedFs) {
|
if !operations.CanServerSideMove(wrappedFs) {
|
||||||
f.features.Disable("PutStream")
|
f.features.Disable("PutStream")
|
||||||
}
|
}
|
||||||
// Enable ListP always
|
|
||||||
f.features.ListP = f.ListP
|
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
@@ -355,39 +338,11 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
|||||||
// found.
|
// found.
|
||||||
// List entries and process them
|
// List entries and process them
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
return list.WithListP(ctx, dir, f)
|
entries, err = f.Fs.List(ctx, dir)
|
||||||
}
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
// ListP lists the objects and directories of the Fs starting
|
|
||||||
// from dir non recursively into out.
|
|
||||||
//
|
|
||||||
// dir should be "" to start from the root, and should not
|
|
||||||
// have trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
//
|
|
||||||
// It should call callback for each tranche of entries read.
|
|
||||||
// These need not be returned in any particular order. If
|
|
||||||
// callback returns an error then the listing will stop
|
|
||||||
// immediately.
|
|
||||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
|
||||||
wrappedCallback := func(entries fs.DirEntries) error {
|
|
||||||
entries, err := f.processEntries(entries)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return callback(entries)
|
|
||||||
}
|
}
|
||||||
listP := f.Fs.Features().ListP
|
return f.processEntries(entries)
|
||||||
if listP == nil {
|
|
||||||
entries, err := f.Fs.List(ctx, dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return wrappedCallback(entries)
|
|
||||||
}
|
|
||||||
return listP(ctx, dir, wrappedCallback)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -487,7 +442,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
|
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -821,14 +776,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return f.Fs.Mkdir(ctx, dir)
|
return f.Fs.Mkdir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MkdirMetadata makes the root directory of the Fs object
|
|
||||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
||||||
if do := f.Fs.Features().MkdirMetadata; do != nil {
|
|
||||||
return do(ctx, dir, metadata)
|
|
||||||
}
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
@@ -972,14 +919,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return do(ctx, srcFs.Fs, srcRemote, dstRemote)
|
return do(ctx, srcFs.Fs, srcRemote, dstRemote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
if do := f.Fs.Features().DirSetModTime; do != nil {
|
|
||||||
return do(ctx, dir, modTime)
|
|
||||||
}
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
// CleanUp the trash in the Fs
|
||||||
//
|
//
|
||||||
// Implement this if you have a way of emptying the trash or
|
// Implement this if you have a way of emptying the trash or
|
||||||
@@ -1318,17 +1257,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||||||
return do.Metadata(ctx)
|
return do.Metadata(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMetadata sets metadata for an Object
|
|
||||||
//
|
|
||||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
|
||||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
|
||||||
do, ok := o.Object.(fs.SetMetadataer)
|
|
||||||
if !ok {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do.SetMetadata(ctx, metadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
// Hash returns the selected checksum of the file
|
||||||
// If no checksum is available it returns ""
|
// If no checksum is available it returns ""
|
||||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||||
@@ -1394,7 +1322,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Get a chunkedreader for the wrapped object
|
// Get a chunkedreader for the wrapped object
|
||||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
|
||||||
// Get file handle
|
// Get file handle
|
||||||
var file io.Reader
|
var file io.Reader
|
||||||
if offset != 0 {
|
if offset != 0 {
|
||||||
@@ -1561,8 +1489,6 @@ var (
|
|||||||
_ fs.Copier = (*Fs)(nil)
|
_ fs.Copier = (*Fs)(nil)
|
||||||
_ fs.Mover = (*Fs)(nil)
|
_ fs.Mover = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
|
||||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
_ fs.PutStreamer = (*Fs)(nil)
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
_ fs.UnWrapper = (*Fs)(nil)
|
_ fs.UnWrapper = (*Fs)(nil)
|
||||||
|
|||||||
@@ -192,7 +192,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
|||||||
dirNameEncrypt: dirNameEncrypt,
|
dirNameEncrypt: dirNameEncrypt,
|
||||||
encryptedSuffix: ".bin",
|
encryptedSuffix: ".bin",
|
||||||
}
|
}
|
||||||
c.buffers.New = func() any {
|
c.buffers.New = func() interface{} {
|
||||||
return new([blockSize]byte)
|
return new([blockSize]byte)
|
||||||
}
|
}
|
||||||
err := c.Key(password, salt)
|
err := c.Key(password, salt)
|
||||||
@@ -329,14 +329,14 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
|||||||
for _, runeValue := range plaintext {
|
for _, runeValue := range plaintext {
|
||||||
dir += int(runeValue)
|
dir += int(runeValue)
|
||||||
}
|
}
|
||||||
dir %= 256
|
dir = dir % 256
|
||||||
|
|
||||||
// We'll use this number to store in the result filename...
|
// We'll use this number to store in the result filename...
|
||||||
var result bytes.Buffer
|
var result bytes.Buffer
|
||||||
_, _ = result.WriteString(strconv.Itoa(dir) + ".")
|
_, _ = result.WriteString(strconv.Itoa(dir) + ".")
|
||||||
|
|
||||||
// but we'll augment it with the nameKey for real calculation
|
// but we'll augment it with the nameKey for real calculation
|
||||||
for i := range len(c.nameKey) {
|
for i := 0; i < len(c.nameKey); i++ {
|
||||||
dir += int(c.nameKey[i])
|
dir += int(c.nameKey[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -418,7 +418,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// add the nameKey to get the real rotate distance
|
// add the nameKey to get the real rotate distance
|
||||||
for i := range len(c.nameKey) {
|
for i := 0; i < len(c.nameKey); i++ {
|
||||||
dir += int(c.nameKey[i])
|
dir += int(c.nameKey[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -450,7 +450,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||||||
if pos >= 26 {
|
if pos >= 26 {
|
||||||
pos -= 6
|
pos -= 6
|
||||||
}
|
}
|
||||||
pos -= thisdir
|
pos = pos - thisdir
|
||||||
if pos < 0 {
|
if pos < 0 {
|
||||||
pos += 52
|
pos += 52
|
||||||
}
|
}
|
||||||
@@ -664,7 +664,7 @@ func (n *nonce) increment() {
|
|||||||
// add a uint64 to the nonce
|
// add a uint64 to the nonce
|
||||||
func (n *nonce) add(x uint64) {
|
func (n *nonce) add(x uint64) {
|
||||||
carry := uint16(0)
|
carry := uint16(0)
|
||||||
for i := range 8 {
|
for i := 0; i < 8; i++ {
|
||||||
digit := (*n)[i]
|
digit := (*n)[i]
|
||||||
xDigit := byte(x)
|
xDigit := byte(x)
|
||||||
x >>= 8
|
x >>= 8
|
||||||
@@ -888,7 +888,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
|||||||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||||
// Zero out the bad block and continue
|
// Zero out the bad block and continue
|
||||||
for i := range (*fh.buf)[:n] {
|
for i := range (*fh.buf)[:n] {
|
||||||
fh.buf[i] = 0
|
(*fh.buf)[i] = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fh.bufIndex = 0
|
fh.bufIndex = 0
|
||||||
|
|||||||
@@ -1307,7 +1307,10 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||||
end := len(ciphertext)
|
end := len(ciphertext)
|
||||||
if underlyingLimit >= 0 {
|
if underlyingLimit >= 0 {
|
||||||
end = min(int(underlyingOffset+underlyingLimit), len(ciphertext))
|
end = int(underlyingOffset + underlyingLimit)
|
||||||
|
if end > len(ciphertext) {
|
||||||
|
end = len(ciphertext)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
||||||
return reader, nil
|
return reader, nil
|
||||||
@@ -1487,7 +1490,7 @@ func TestDecrypterRead(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Test truncating the file at each possible point
|
// Test truncating the file at each possible point
|
||||||
for i := range len(file16) - 1 {
|
for i := 0; i < len(file16)-1; i++ {
|
||||||
what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
|
what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
|
||||||
cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
|
cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
|
||||||
fh, err := c.newDecrypter(cd)
|
fh, err := c.newDecrypter(cd)
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/list"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
@@ -131,16 +130,6 @@ trying to recover an encrypted file with errors and it is desired to
|
|||||||
recover as much of the file as possible.`,
|
recover as much of the file as possible.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "strict_names",
|
|
||||||
Help: `If set, this will raise an error when crypt comes across a filename that can't be decrypted.
|
|
||||||
|
|
||||||
(By default, rclone will just log a NOTICE and continue as normal.)
|
|
||||||
This can happen if encrypted and unencrypted files are stored in the same
|
|
||||||
directory (which is not recommended.) It may also indicate a more serious
|
|
||||||
problem that should be investigated.`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "filename_encoding",
|
Name: "filename_encoding",
|
||||||
Help: `How to encode the encrypted filename to text string.
|
Help: `How to encode the encrypted filename to text string.
|
||||||
@@ -264,39 +253,24 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
cipher: cipher,
|
cipher: cipher,
|
||||||
}
|
}
|
||||||
cache.PinUntilFinalized(f.Fs, f)
|
cache.PinUntilFinalized(f.Fs, f)
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// the features here are ones we could support, and they are
|
// the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs
|
// ANDed with the ones from wrappedFs
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||||
WriteMimeType: false,
|
WriteMimeType: false,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||||
ReadMetadata: true,
|
ReadMetadata: true,
|
||||||
WriteMetadata: true,
|
WriteMetadata: true,
|
||||||
UserMetadata: true,
|
UserMetadata: true,
|
||||||
ReadDirMetadata: true,
|
PartialUploads: true,
|
||||||
WriteDirMetadata: true,
|
|
||||||
WriteDirSetModTime: true,
|
|
||||||
UserDirMetadata: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
|
|
||||||
// Enable ListP always
|
|
||||||
f.features.ListP = f.ListP
|
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -313,7 +287,6 @@ type Options struct {
|
|||||||
PassBadBlocks bool `config:"pass_bad_blocks"`
|
PassBadBlocks bool `config:"pass_bad_blocks"`
|
||||||
FilenameEncoding string `config:"filename_encoding"`
|
FilenameEncoding string `config:"filename_encoding"`
|
||||||
Suffix string `config:"suffix"`
|
Suffix string `config:"suffix"`
|
||||||
StrictNames bool `config:"strict_names"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
// Fs represents a wrapped fs.Fs
|
||||||
@@ -348,64 +321,45 @@ func (f *Fs) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt an object file name to entries.
|
// Encrypt an object file name to entries.
|
||||||
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) error {
|
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
||||||
remote := obj.Remote()
|
remote := obj.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptFileName(remote)
|
decryptedRemote, err := f.cipher.DecryptFileName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if f.opt.StrictNames {
|
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
|
||||||
return fmt.Errorf("%s: undecryptable file name detected: %v", remote, err)
|
return
|
||||||
}
|
|
||||||
fs.Logf(remote, "Skipping undecryptable file name: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
if f.opt.ShowMapping {
|
if f.opt.ShowMapping {
|
||||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||||
}
|
}
|
||||||
*entries = append(*entries, f.newObject(obj))
|
*entries = append(*entries, f.newObject(obj))
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt a directory file name to entries.
|
// Encrypt a directory file name to entries.
|
||||||
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) error {
|
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if f.opt.StrictNames {
|
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
|
||||||
return fmt.Errorf("%s: undecryptable dir name detected: %v", remote, err)
|
return
|
||||||
}
|
|
||||||
fs.Logf(remote, "Skipping undecryptable dir name: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
if f.opt.ShowMapping {
|
if f.opt.ShowMapping {
|
||||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||||
}
|
}
|
||||||
*entries = append(*entries, f.newDir(ctx, dir))
|
*entries = append(*entries, f.newDir(ctx, dir))
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
||||||
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
||||||
newEntries = entries[:0] // in place filter
|
newEntries = entries[:0] // in place filter
|
||||||
errors := 0
|
|
||||||
var firsterr error
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
switch x := entry.(type) {
|
switch x := entry.(type) {
|
||||||
case fs.Object:
|
case fs.Object:
|
||||||
err = f.add(&newEntries, x)
|
f.add(&newEntries, x)
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
err = f.addDir(ctx, &newEntries, x)
|
f.addDir(ctx, &newEntries, x)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
return nil, fmt.Errorf("unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
errors++
|
|
||||||
if firsterr == nil {
|
|
||||||
firsterr = err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if firsterr != nil {
|
|
||||||
return nil, fmt.Errorf("there were %v undecryptable name errors. first error: %v", errors, firsterr)
|
|
||||||
}
|
}
|
||||||
return newEntries, nil
|
return newEntries, nil
|
||||||
}
|
}
|
||||||
@@ -420,40 +374,11 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
return list.WithListP(ctx, dir, f)
|
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
// ListP lists the objects and directories of the Fs starting
|
|
||||||
// from dir non recursively into out.
|
|
||||||
//
|
|
||||||
// dir should be "" to start from the root, and should not
|
|
||||||
// have trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
//
|
|
||||||
// It should call callback for each tranche of entries read.
|
|
||||||
// These need not be returned in any particular order. If
|
|
||||||
// callback returns an error then the listing will stop
|
|
||||||
// immediately.
|
|
||||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
|
||||||
wrappedCallback := func(entries fs.DirEntries) error {
|
|
||||||
entries, err := f.encryptEntries(ctx, entries)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return callback(entries)
|
|
||||||
}
|
}
|
||||||
listP := f.Fs.Features().ListP
|
return f.encryptEntries(ctx, entries)
|
||||||
encryptedDir := f.cipher.EncryptDirName(dir)
|
|
||||||
if listP == nil {
|
|
||||||
entries, err := f.Fs.List(ctx, encryptedDir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return wrappedCallback(entries)
|
|
||||||
}
|
|
||||||
return listP(ctx, encryptedDir, wrappedCallback)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -553,7 +478,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
|
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||||
}
|
}
|
||||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||||
}
|
}
|
||||||
@@ -588,37 +513,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// MkdirMetadata makes the root directory of the Fs object
|
|
||||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
||||||
do := f.Fs.Features().MkdirMetadata
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
newDir, err := do(ctx, f.cipher.EncryptDirName(dir), metadata)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var entries = make(fs.DirEntries, 0, 1)
|
|
||||||
err = f.addDir(ctx, &entries, newDir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
newDir, ok := entries[0].(fs.Directory)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
|
|
||||||
}
|
|
||||||
return newDir, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
do := f.Fs.Features().DirSetModTime
|
|
||||||
if do == nil {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx, f.cipher.EncryptDirName(dir), modTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
@@ -860,7 +754,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
}
|
}
|
||||||
out := make([]fs.Directory, len(dirs))
|
out := make([]fs.Directory, len(dirs))
|
||||||
for i, dir := range dirs {
|
for i, dir := range dirs {
|
||||||
out[i] = fs.NewDirWrapper(f.cipher.EncryptDirName(dir.Remote()), dir)
|
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
|
||||||
}
|
}
|
||||||
return do(ctx, out)
|
return do(ctx, out)
|
||||||
}
|
}
|
||||||
@@ -957,7 +851,7 @@ Usage Example:
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "decode":
|
case "decode":
|
||||||
out := make([]string, 0, len(arg))
|
out := make([]string, 0, len(arg))
|
||||||
@@ -1096,14 +990,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// newDir returns a dir with the Name decrypted
|
// newDir returns a dir with the Name decrypted
|
||||||
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
||||||
|
newDir := fs.NewDirCopy(ctx, dir)
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
||||||
} else {
|
} else {
|
||||||
remote = decryptedRemote
|
newDir.SetRemote(decryptedRemote)
|
||||||
}
|
}
|
||||||
newDir := fs.NewDirWrapper(remote, dir)
|
|
||||||
return newDir
|
return newDir
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1281,17 +1175,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||||||
return do.Metadata(ctx)
|
return do.Metadata(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMetadata sets metadata for an Object
|
|
||||||
//
|
|
||||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
|
||||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
|
||||||
do, ok := o.Object.(fs.SetMetadataer)
|
|
||||||
if !ok {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do.SetMetadata(ctx, metadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType returns the content type of the Object if
|
// MimeType returns the content type of the Object if
|
||||||
// known, or "" if not
|
// known, or "" if not
|
||||||
//
|
//
|
||||||
@@ -1317,8 +1200,6 @@ var (
|
|||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
_ fs.Wrapper = (*Fs)(nil)
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
_ fs.MergeDirser = (*Fs)(nil)
|
||||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
|
||||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte {
|
|||||||
}
|
}
|
||||||
length := len(buf)
|
length := len(buf)
|
||||||
padding := n - (length % n)
|
padding := n - (length % n)
|
||||||
for range padding {
|
for i := 0; i < padding; i++ {
|
||||||
buf = append(buf, byte(padding))
|
buf = append(buf, byte(padding))
|
||||||
}
|
}
|
||||||
if (len(buf) % n) != 0 {
|
if (len(buf) % n) != 0 {
|
||||||
@@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) {
|
|||||||
if padding == 0 {
|
if padding == 0 {
|
||||||
return nil, ErrorPaddingTooShort
|
return nil, ErrorPaddingTooShort
|
||||||
}
|
}
|
||||||
for i := range padding {
|
for i := 0; i < padding; i++ {
|
||||||
if buf[length-1-i] != byte(padding) {
|
if buf[length-1-i] != byte(padding) {
|
||||||
return nil, ErrorPaddingNotAllTheSame
|
return nil, ErrorPaddingNotAllTheSame
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,38 +0,0 @@
|
|||||||
// Type definitions specific to Dataverse
|
|
||||||
|
|
||||||
package api
|
|
||||||
|
|
||||||
// DataverseDatasetResponse is returned by the Dataverse dataset API
|
|
||||||
type DataverseDatasetResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Data DataverseDataset `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataverseDataset is the representation of a dataset
|
|
||||||
type DataverseDataset struct {
|
|
||||||
LatestVersion DataverseDatasetVersion `json:"latestVersion"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataverseDatasetVersion is the representation of a dataset version
|
|
||||||
type DataverseDatasetVersion struct {
|
|
||||||
LastUpdateTime string `json:"lastUpdateTime"`
|
|
||||||
Files []DataverseFile `json:"files"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataverseFile is the representation of a file found in a dataset
|
|
||||||
type DataverseFile struct {
|
|
||||||
DirectoryLabel string `json:"directoryLabel"`
|
|
||||||
DataFile DataverseDataFile `json:"dataFile"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataverseDataFile represents file metadata details
|
|
||||||
type DataverseDataFile struct {
|
|
||||||
ID int64 `json:"id"`
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
ContentType string `json:"contentType"`
|
|
||||||
FileSize int64 `json:"filesize"`
|
|
||||||
OriginalFileFormat string `json:"originalFileFormat"`
|
|
||||||
OriginalFileSize int64 `json:"originalFileSize"`
|
|
||||||
OriginalFileName string `json:"originalFileName"`
|
|
||||||
MD5 string `json:"md5"`
|
|
||||||
}
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
// Type definitions specific to InvenioRDM
|
|
||||||
|
|
||||||
package api
|
|
||||||
|
|
||||||
// InvenioRecordResponse is the representation of a record stored in InvenioRDM
|
|
||||||
type InvenioRecordResponse struct {
|
|
||||||
Links InvenioRecordResponseLinks `json:"links"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvenioRecordResponseLinks represents a record's links
|
|
||||||
type InvenioRecordResponseLinks struct {
|
|
||||||
Self string `json:"self"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvenioFilesResponse is the representation of a record's files
|
|
||||||
type InvenioFilesResponse struct {
|
|
||||||
Entries []InvenioFilesResponseEntry `json:"entries"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvenioFilesResponseEntry is the representation of a file entry
|
|
||||||
type InvenioFilesResponseEntry struct {
|
|
||||||
Key string `json:"key"`
|
|
||||||
Checksum string `json:"checksum"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
Updated string `json:"updated"`
|
|
||||||
MimeType string `json:"mimetype"`
|
|
||||||
Links InvenioFilesResponseEntryLinks `json:"links"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvenioFilesResponseEntryLinks represents file links details
|
|
||||||
type InvenioFilesResponseEntryLinks struct {
|
|
||||||
Content string `json:"content"`
|
|
||||||
}
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
// Package api has general type definitions for doi
|
|
||||||
package api
|
|
||||||
|
|
||||||
// DoiResolverResponse is returned by the DOI resolver API
|
|
||||||
//
|
|
||||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
|
||||||
type DoiResolverResponse struct {
|
|
||||||
ResponseCode int `json:"responseCode"`
|
|
||||||
Handle string `json:"handle"`
|
|
||||||
Values []DoiResolverResponseValue `json:"values"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoiResolverResponseValue is a single handle record value
|
|
||||||
type DoiResolverResponseValue struct {
|
|
||||||
Index int `json:"index"`
|
|
||||||
Type string `json:"type"`
|
|
||||||
Data DoiResolverResponseValueData `json:"data"`
|
|
||||||
TTL int `json:"ttl"`
|
|
||||||
Timestamp string `json:"timestamp"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoiResolverResponseValueData is the data held in a handle value
|
|
||||||
type DoiResolverResponseValueData struct {
|
|
||||||
Format string `json:"format"`
|
|
||||||
Value any `json:"value"`
|
|
||||||
}
|
|
||||||
@@ -1,112 +0,0 @@
|
|||||||
// Implementation for Dataverse
|
|
||||||
|
|
||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/doi/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Returns true if resolvedURL is likely a DOI hosted on a Dataverse intallation
|
|
||||||
func activateDataverse(resolvedURL *url.URL) (isActive bool) {
|
|
||||||
queryValues := resolvedURL.Query()
|
|
||||||
persistentID := queryValues.Get("persistentId")
|
|
||||||
return persistentID != ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve the main API endpoint for a DOI hosted on a Dataverse installation
|
|
||||||
func resolveDataverseEndpoint(resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
|
|
||||||
queryValues := resolvedURL.Query()
|
|
||||||
persistentID := queryValues.Get("persistentId")
|
|
||||||
|
|
||||||
query := url.Values{}
|
|
||||||
query.Add("persistentId", persistentID)
|
|
||||||
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/datasets/:persistentId/", RawQuery: query.Encode()})
|
|
||||||
|
|
||||||
return Dataverse, endpointURL, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// dataverseProvider implements the doiProvider interface for Dataverse installations
|
|
||||||
type dataverseProvider struct {
|
|
||||||
f *Fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
|
||||||
func (dp *dataverseProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
|
|
||||||
// Use the cache if populated
|
|
||||||
cachedEntries, found := dp.f.cache.GetMaybe("files")
|
|
||||||
if found {
|
|
||||||
parsedEntries, ok := cachedEntries.([]Object)
|
|
||||||
if ok {
|
|
||||||
for _, entry := range parsedEntries {
|
|
||||||
newEntry := entry
|
|
||||||
entries = append(entries, &newEntry)
|
|
||||||
}
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
filesURL := dp.f.endpoint
|
|
||||||
var res *http.Response
|
|
||||||
var result api.DataverseDatasetResponse
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
|
|
||||||
Parameters: filesURL.Query(),
|
|
||||||
}
|
|
||||||
err = dp.f.pacer.Call(func() (bool, error) {
|
|
||||||
res, err = dp.f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
|
||||||
}
|
|
||||||
modTime, modTimeErr := time.Parse(time.RFC3339, result.Data.LatestVersion.LastUpdateTime)
|
|
||||||
if modTimeErr != nil {
|
|
||||||
fs.Logf(dp.f, "error: could not parse last update time %v", modTimeErr)
|
|
||||||
modTime = timeUnset
|
|
||||||
}
|
|
||||||
for _, file := range result.Data.LatestVersion.Files {
|
|
||||||
contentURLPath := fmt.Sprintf("/api/access/datafile/%d", file.DataFile.ID)
|
|
||||||
query := url.Values{}
|
|
||||||
query.Add("format", "original")
|
|
||||||
contentURL := dp.f.endpoint.ResolveReference(&url.URL{Path: contentURLPath, RawQuery: query.Encode()})
|
|
||||||
entry := &Object{
|
|
||||||
fs: dp.f,
|
|
||||||
remote: path.Join(file.DirectoryLabel, file.DataFile.Filename),
|
|
||||||
contentURL: contentURL.String(),
|
|
||||||
size: file.DataFile.FileSize,
|
|
||||||
modTime: modTime,
|
|
||||||
md5: file.DataFile.MD5,
|
|
||||||
contentType: file.DataFile.ContentType,
|
|
||||||
}
|
|
||||||
if file.DataFile.OriginalFileName != "" {
|
|
||||||
entry.remote = path.Join(file.DirectoryLabel, file.DataFile.OriginalFileName)
|
|
||||||
entry.size = file.DataFile.OriginalFileSize
|
|
||||||
entry.contentType = file.DataFile.OriginalFileFormat
|
|
||||||
}
|
|
||||||
entries = append(entries, entry)
|
|
||||||
}
|
|
||||||
// Populate the cache
|
|
||||||
cacheEntries := []Object{}
|
|
||||||
for _, entry := range entries {
|
|
||||||
cacheEntries = append(cacheEntries, *entry)
|
|
||||||
}
|
|
||||||
dp.f.cache.Put("files", cacheEntries)
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDataverseProvider(f *Fs) doiProvider {
|
|
||||||
return &dataverseProvider{
|
|
||||||
f: f,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,649 +0,0 @@
|
|||||||
// Package doi provides a filesystem interface for digital objects identified by DOIs.
|
|
||||||
//
|
|
||||||
// See: https://www.doi.org/the-identifier/what-is-a-doi/
|
|
||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/doi/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/cache"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// the URL of the DOI resolver
|
|
||||||
//
|
|
||||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
|
||||||
doiResolverAPIURL = "https://doi.org/api"
|
|
||||||
minSleep = 10 * time.Millisecond
|
|
||||||
maxSleep = 2 * time.Second
|
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errorReadOnly = errors.New("doi remotes are read only")
|
|
||||||
timeUnset = time.Unix(0, 0)
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
fsi := &fs.RegInfo{
|
|
||||||
Name: "doi",
|
|
||||||
Description: "DOI datasets",
|
|
||||||
NewFs: NewFs,
|
|
||||||
CommandHelp: commandHelp,
|
|
||||||
Options: []fs.Option{{
|
|
||||||
Name: "doi",
|
|
||||||
Help: "The DOI or the doi.org URL.",
|
|
||||||
Required: true,
|
|
||||||
}, {
|
|
||||||
Name: fs.ConfigProvider,
|
|
||||||
Help: `DOI provider.
|
|
||||||
|
|
||||||
The DOI provider can be set when rclone does not automatically recognize a supported DOI provider.`,
|
|
||||||
Examples: []fs.OptionExample{
|
|
||||||
{
|
|
||||||
Value: "auto",
|
|
||||||
Help: "Auto-detect provider",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: string(Zenodo),
|
|
||||||
Help: "Zenodo",
|
|
||||||
}, {
|
|
||||||
Value: string(Dataverse),
|
|
||||||
Help: "Dataverse",
|
|
||||||
}, {
|
|
||||||
Value: string(Invenio),
|
|
||||||
Help: "Invenio",
|
|
||||||
}},
|
|
||||||
Required: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "doi_resolver_api_url",
|
|
||||||
Help: `The URL of the DOI resolver API to use.
|
|
||||||
|
|
||||||
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
|
|
||||||
|
|
||||||
Defaults to "https://doi.org/api".`,
|
|
||||||
Required: false,
|
|
||||||
Advanced: true,
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
fs.Register(fsi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Provider defines the type of provider hosting the DOI
|
|
||||||
type Provider string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Zenodo provider, see https://zenodo.org
|
|
||||||
Zenodo Provider = "zenodo"
|
|
||||||
// Dataverse provider, see https://dataverse.harvard.edu
|
|
||||||
Dataverse Provider = "dataverse"
|
|
||||||
// Invenio provider, see https://inveniordm.docs.cern.ch
|
|
||||||
Invenio Provider = "invenio"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
|
||||||
type Options struct {
|
|
||||||
Doi string `config:"doi"` // The DOI, a digital identifier of an object, usually a dataset
|
|
||||||
Provider string `config:"provider"` // The DOI provider
|
|
||||||
DoiResolverAPIURL string `config:"doi_resolver_api_url"` // The URL of the DOI resolver API to use.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs stores the interface to the remote HTTP files
|
|
||||||
type Fs struct {
|
|
||||||
name string // name of this remote
|
|
||||||
root string // the path we are working on
|
|
||||||
provider Provider // the DOI provider
|
|
||||||
doiProvider doiProvider // the interface used to interact with the DOI provider
|
|
||||||
features *fs.Features // optional features
|
|
||||||
opt Options // options for this backend
|
|
||||||
ci *fs.ConfigInfo // global config
|
|
||||||
endpoint *url.URL // the main API endpoint for this remote
|
|
||||||
endpointURL string // endpoint as a string
|
|
||||||
srv *rest.Client // the connection to the server
|
|
||||||
pacer *fs.Pacer // pacer for API calls
|
|
||||||
cache *cache.Cache // a cache for the remote metadata
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
|
|
||||||
type Object struct {
|
|
||||||
fs *Fs // what this object is part of
|
|
||||||
remote string // the remote path
|
|
||||||
contentURL string // the URL where the contents of the file can be downloaded
|
|
||||||
size int64 // size of the object
|
|
||||||
modTime time.Time // modification time of the object
|
|
||||||
contentType string // content type of the object
|
|
||||||
md5 string // MD5 hash of the object content
|
|
||||||
}
|
|
||||||
|
|
||||||
// doiProvider is the interface used to list objects in a DOI
|
|
||||||
type doiProvider interface {
|
|
||||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
|
||||||
ListEntries(ctx context.Context) (entries []*Object, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the input string as a DOI
|
|
||||||
// Examples:
|
|
||||||
// 10.1000/182 -> 10.1000/182
|
|
||||||
// https://doi.org/10.1000/182 -> 10.1000/182
|
|
||||||
// doi:10.1000/182 -> 10.1000/182
|
|
||||||
func parseDoi(doi string) string {
|
|
||||||
doiURL, err := url.Parse(doi)
|
|
||||||
if err != nil {
|
|
||||||
return doi
|
|
||||||
}
|
|
||||||
if doiURL.Scheme == "doi" {
|
|
||||||
return strings.TrimLeft(strings.TrimPrefix(doi, "doi:"), "/")
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(doiURL.Hostname(), "doi.org") {
|
|
||||||
return strings.TrimLeft(doiURL.Path, "/")
|
|
||||||
}
|
|
||||||
return doi
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve a DOI to a URL
|
|
||||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
|
||||||
func resolveDoiURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (doiURL *url.URL, err error) {
|
|
||||||
resolverURL := opt.DoiResolverAPIURL
|
|
||||||
if resolverURL == "" {
|
|
||||||
resolverURL = doiResolverAPIURL
|
|
||||||
}
|
|
||||||
|
|
||||||
var result api.DoiResolverResponse
|
|
||||||
params := url.Values{}
|
|
||||||
params.Add("index", "1")
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: resolverURL,
|
|
||||||
Path: "/handles/" + opt.Doi,
|
|
||||||
Parameters: params,
|
|
||||||
}
|
|
||||||
err = pacer.Call(func() (bool, error) {
|
|
||||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if result.ResponseCode != 1 {
|
|
||||||
return nil, fmt.Errorf("could not resolve DOI (error code %d)", result.ResponseCode)
|
|
||||||
}
|
|
||||||
resolvedURLStr := ""
|
|
||||||
for _, value := range result.Values {
|
|
||||||
if value.Type == "URL" && value.Data.Format == "string" {
|
|
||||||
valueStr, ok := value.Data.Value.(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("could not resolve DOI (incorrect response format)")
|
|
||||||
}
|
|
||||||
resolvedURLStr = valueStr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
resolvedURL, err := url.Parse(resolvedURLStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resolvedURL, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve the passed configuration into a provider and enpoint
|
|
||||||
func resolveEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (provider Provider, endpoint *url.URL, err error) {
|
|
||||||
resolvedURL, err := resolveDoiURL(ctx, srv, pacer, opt)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch opt.Provider {
|
|
||||||
case string(Dataverse):
|
|
||||||
return resolveDataverseEndpoint(resolvedURL)
|
|
||||||
case string(Invenio):
|
|
||||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
|
||||||
case string(Zenodo):
|
|
||||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
|
||||||
}
|
|
||||||
|
|
||||||
hostname := strings.ToLower(resolvedURL.Hostname())
|
|
||||||
if hostname == "dataverse.harvard.edu" || activateDataverse(resolvedURL) {
|
|
||||||
return resolveDataverseEndpoint(resolvedURL)
|
|
||||||
}
|
|
||||||
if hostname == "zenodo.org" || strings.HasSuffix(hostname, ".zenodo.org") {
|
|
||||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
|
||||||
}
|
|
||||||
if activateInvenio(ctx, srv, pacer, resolvedURL) {
|
|
||||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", nil, fmt.Errorf("provider '%s' is not supported", resolvedURL.Hostname())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make the http connection from the passed options
|
|
||||||
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
|
|
||||||
provider, endpoint, err := resolveEndpoint(ctx, f.srv, f.pacer, opt)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update f with the new parameters
|
|
||||||
f.srv.SetRoot(endpoint.ResolveReference(&url.URL{Path: "/"}).String())
|
|
||||||
f.endpoint = endpoint
|
|
||||||
f.endpointURL = endpoint.String()
|
|
||||||
f.provider = provider
|
|
||||||
f.opt.Provider = string(provider)
|
|
||||||
|
|
||||||
switch f.provider {
|
|
||||||
case Dataverse:
|
|
||||||
f.doiProvider = newDataverseProvider(f)
|
|
||||||
case Invenio, Zenodo:
|
|
||||||
f.doiProvider = newInvenioProvider(f)
|
|
||||||
default:
|
|
||||||
return false, fmt.Errorf("provider type '%s' not supported", f.provider)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine if the root is a file
|
|
||||||
entries, err := f.doiProvider.ListEntries(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.remote == f.root {
|
|
||||||
isFile = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return isFile, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
|
||||||
var retryErrorCodes = []int{
|
|
||||||
429, // Too Many Requests.
|
|
||||||
500, // Internal Server Error
|
|
||||||
502, // Bad Gateway
|
|
||||||
503, // Service Unavailable
|
|
||||||
504, // Gateway Timeout
|
|
||||||
509, // Bandwidth Limit Exceeded
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this res and err
|
|
||||||
// deserve to be retried. It returns the err as a convenience.
|
|
||||||
func shouldRetry(ctx context.Context, res *http.Response, err error) (bool, error) {
|
|
||||||
if fserrors.ContextError(ctx, &err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs creates a new Fs object from the name and root. It connects to
|
|
||||||
// the host specified in the config file.
|
|
||||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|
||||||
root = strings.Trim(root, "/")
|
|
||||||
|
|
||||||
// Parse config into Options struct
|
|
||||||
opt := new(Options)
|
|
||||||
err := configstruct.Set(m, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
opt.Doi = parseDoi(opt.Doi)
|
|
||||||
|
|
||||||
client := fshttp.NewClient(ctx)
|
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
root: root,
|
|
||||||
opt: *opt,
|
|
||||||
ci: ci,
|
|
||||||
srv: rest.NewClient(client),
|
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
|
||||||
cache: cache.New(),
|
|
||||||
}
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(ctx, f)
|
|
||||||
|
|
||||||
isFile, err := f.httpConnection(ctx, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if isFile {
|
|
||||||
// return an error with an fs which points to the parent
|
|
||||||
newRoot := path.Dir(f.root)
|
|
||||||
if newRoot == "." {
|
|
||||||
newRoot = ""
|
|
||||||
}
|
|
||||||
f.root = newRoot
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the configured name of the file system
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root returns the root for the filesystem
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the URL for the filesystem
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("DOI %s", f.opt.Doi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.Set(hash.MD5)
|
|
||||||
// return hash.Set(hash.None)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir makes the root directory of the Fs object
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
return errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove a remote http file object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
return errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the root directory of the Fs object
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
return errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject creates a new remote http file object
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
entries, err := f.doiProvider.ListEntries(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
remoteFullPath := remote
|
|
||||||
if f.root != "" {
|
|
||||||
remoteFullPath = path.Join(f.root, remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.Remote() == remoteFullPath {
|
|
||||||
return entry, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
|
||||||
// entries can be returned in any order but should be for a
|
|
||||||
// complete directory.
|
|
||||||
//
|
|
||||||
// dir should be "" to list the root, and should not have
|
|
||||||
// trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
fileEntries, err := f.doiProvider.ListEntries(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error listing %q: %w", dir, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fullDir := path.Join(f.root, dir)
|
|
||||||
if fullDir != "" {
|
|
||||||
fullDir += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
dirPaths := map[string]bool{}
|
|
||||||
for _, entry := range fileEntries {
|
|
||||||
// First, filter out files not in `fullDir`
|
|
||||||
if !strings.HasPrefix(entry.remote, fullDir) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Then, find entries in subfolers
|
|
||||||
remotePath := entry.remote
|
|
||||||
if fullDir != "" {
|
|
||||||
remotePath = strings.TrimLeft(strings.TrimPrefix(remotePath, fullDir), "/")
|
|
||||||
}
|
|
||||||
parts := strings.SplitN(remotePath, "/", 2)
|
|
||||||
if len(parts) == 1 {
|
|
||||||
newEntry := *entry
|
|
||||||
newEntry.remote = path.Join(dir, remotePath)
|
|
||||||
entries = append(entries, &newEntry)
|
|
||||||
} else {
|
|
||||||
dirPaths[path.Join(dir, parts[0])] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for dirPath := range dirPaths {
|
|
||||||
entry := fs.NewDir(dirPath, time.Time{})
|
|
||||||
entries = append(entries, entry)
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
return nil, errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
return nil, errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs is the filesystem this remote http file object is located within
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the URL to the remote HTTP file
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote the name of the remote HTTP file, relative to the fs root
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|
||||||
if t != hash.MD5 {
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
return o.md5, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size in bytes of the remote http file
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return o.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the remote http file
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return o.modTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification and access time to the specified time
|
|
||||||
//
|
|
||||||
// it also updates the info field
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
||||||
return errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open a remote http file object for reading. Seek is supported
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|
||||||
fs.FixRangeOption(options, o.size)
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: o.contentURL,
|
|
||||||
Options: options,
|
|
||||||
}
|
|
||||||
var res *http.Response
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
res, err = o.fs.srv.Call(ctx, &opts)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Open failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle non-compliant redirects
|
|
||||||
if res.Header.Get("Location") != "" {
|
|
||||||
newURL, err := res.Location()
|
|
||||||
if err == nil {
|
|
||||||
opts.RootURL = newURL.String()
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
res, err = o.fs.srv.Call(ctx, &opts)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Open failed: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.Body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
return errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
|
||||||
func (o *Object) MimeType(ctx context.Context) string {
|
|
||||||
return o.contentType
|
|
||||||
}
|
|
||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
|
||||||
Name: "metadata",
|
|
||||||
Short: "Show metadata about the DOI.",
|
|
||||||
Long: `This command returns a JSON object with some information about the DOI.
|
|
||||||
|
|
||||||
rclone backend medatadata doi:
|
|
||||||
|
|
||||||
It returns a JSON object representing metadata about the DOI.
|
|
||||||
`,
|
|
||||||
}, {
|
|
||||||
Name: "set",
|
|
||||||
Short: "Set command for updating the config parameters.",
|
|
||||||
Long: `This set command can be used to update the config parameters
|
|
||||||
for a running doi backend.
|
|
||||||
|
|
||||||
Usage Examples:
|
|
||||||
|
|
||||||
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
|
||||||
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
|
||||||
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
|
||||||
|
|
||||||
The option keys are named as they are in the config file.
|
|
||||||
|
|
||||||
This rebuilds the connection to the doi backend when it is called with
|
|
||||||
the new parameters. Only new parameters need be passed as the values
|
|
||||||
will default to those currently in use.
|
|
||||||
|
|
||||||
It doesn't return anything.
|
|
||||||
`,
|
|
||||||
}}
|
|
||||||
|
|
||||||
// Command the backend to run a named command
|
|
||||||
//
|
|
||||||
// The command run is name
|
|
||||||
// args may be used to read arguments from
|
|
||||||
// opts may be used to read optional arguments from
|
|
||||||
//
|
|
||||||
// The result should be capable of being JSON encoded
|
|
||||||
// If it is a string or a []string it will be shown to the user
|
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
|
||||||
switch name {
|
|
||||||
case "metadata":
|
|
||||||
return f.ShowMetadata(ctx)
|
|
||||||
case "set":
|
|
||||||
newOpt := f.opt
|
|
||||||
err := configstruct.Set(configmap.Simple(opt), &newOpt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("reading config: %w", err)
|
|
||||||
}
|
|
||||||
_, err = f.httpConnection(ctx, &newOpt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("updating session: %w", err)
|
|
||||||
}
|
|
||||||
f.opt = newOpt
|
|
||||||
keys := []string{}
|
|
||||||
for k := range opt {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
|
|
||||||
return nil, nil
|
|
||||||
default:
|
|
||||||
return nil, fs.ErrorCommandNotFound
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShowMetadata returns some metadata about the corresponding DOI
|
|
||||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata interface{}, err error) {
|
|
||||||
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info := map[string]any{}
|
|
||||||
info["DOI"] = f.opt.Doi
|
|
||||||
info["URL"] = doiURL.String()
|
|
||||||
info["metadataURL"] = f.endpointURL
|
|
||||||
info["provider"] = f.provider
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
|
||||||
_ fs.Commander = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
|
||||||
_ fs.MimeTyper = (*Object)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,260 +0,0 @@
|
|||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/doi/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
var remoteName = "TestDoi"
|
|
||||||
|
|
||||||
func TestParseDoi(t *testing.T) {
|
|
||||||
// 10.1000/182 -> 10.1000/182
|
|
||||||
doi := "10.1000/182"
|
|
||||||
parsed := parseDoi(doi)
|
|
||||||
assert.Equal(t, "10.1000/182", parsed)
|
|
||||||
|
|
||||||
// https://doi.org/10.1000/182 -> 10.1000/182
|
|
||||||
doi = "https://doi.org/10.1000/182"
|
|
||||||
parsed = parseDoi(doi)
|
|
||||||
assert.Equal(t, "10.1000/182", parsed)
|
|
||||||
|
|
||||||
// https://dx.doi.org/10.1000/182 -> 10.1000/182
|
|
||||||
doi = "https://dxdoi.org/10.1000/182"
|
|
||||||
parsed = parseDoi(doi)
|
|
||||||
assert.Equal(t, "10.1000/182", parsed)
|
|
||||||
|
|
||||||
// doi:10.1000/182 -> 10.1000/182
|
|
||||||
doi = "doi:10.1000/182"
|
|
||||||
parsed = parseDoi(doi)
|
|
||||||
assert.Equal(t, "10.1000/182", parsed)
|
|
||||||
|
|
||||||
// doi://10.1000/182 -> 10.1000/182
|
|
||||||
doi = "doi://10.1000/182"
|
|
||||||
parsed = parseDoi(doi)
|
|
||||||
assert.Equal(t, "10.1000/182", parsed)
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepareMockDoiResolverServer prepares a test server to resolve DOIs
|
|
||||||
func prepareMockDoiResolverServer(t *testing.T, resolvedURL string) (doiResolverAPIURL string) {
|
|
||||||
mux := http.NewServeMux()
|
|
||||||
|
|
||||||
// Handle requests for resolving DOIs
|
|
||||||
mux.HandleFunc("GET /api/handles/{handle...}", func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// Check that we are resolving a DOI
|
|
||||||
handle := strings.TrimPrefix(r.URL.Path, "/api/handles/")
|
|
||||||
assert.NotEmpty(t, handle)
|
|
||||||
index := r.URL.Query().Get("index")
|
|
||||||
assert.Equal(t, "1", index)
|
|
||||||
|
|
||||||
// Return the most basic response
|
|
||||||
result := api.DoiResolverResponse{
|
|
||||||
ResponseCode: 1,
|
|
||||||
Handle: handle,
|
|
||||||
Values: []api.DoiResolverResponseValue{
|
|
||||||
{
|
|
||||||
Index: 1,
|
|
||||||
Type: "URL",
|
|
||||||
Data: api.DoiResolverResponseValueData{
|
|
||||||
Format: "string",
|
|
||||||
Value: resolvedURL,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
resultBytes, err := json.Marshal(result)
|
|
||||||
require.NoError(t, err)
|
|
||||||
w.Header().Add("Content-Type", "application/json")
|
|
||||||
_, err = w.Write(resultBytes)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Make the test server
|
|
||||||
ts := httptest.NewServer(mux)
|
|
||||||
|
|
||||||
// Close the server at the end of the test
|
|
||||||
t.Cleanup(ts.Close)
|
|
||||||
|
|
||||||
return ts.URL + "/api"
|
|
||||||
}
|
|
||||||
|
|
||||||
func md5Sum(text string) string {
|
|
||||||
hash := md5.Sum([]byte(text))
|
|
||||||
return hex.EncodeToString(hash[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepareMockZenodoServer prepares a test server that mocks Zenodo.org
|
|
||||||
func prepareMockZenodoServer(t *testing.T, files map[string]string) *httptest.Server {
|
|
||||||
mux := http.NewServeMux()
|
|
||||||
|
|
||||||
// Handle requests for a single record
|
|
||||||
mux.HandleFunc("GET /api/records/{recordID...}", func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// Check that we are returning data about a single record
|
|
||||||
recordID := strings.TrimPrefix(r.URL.Path, "/api/records/")
|
|
||||||
assert.NotEmpty(t, recordID)
|
|
||||||
|
|
||||||
// Return the most basic response
|
|
||||||
selfURL, err := url.Parse("http://" + r.Host)
|
|
||||||
require.NoError(t, err)
|
|
||||||
selfURL = selfURL.JoinPath(r.URL.String())
|
|
||||||
result := api.InvenioRecordResponse{
|
|
||||||
Links: api.InvenioRecordResponseLinks{
|
|
||||||
Self: selfURL.String(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
resultBytes, err := json.Marshal(result)
|
|
||||||
require.NoError(t, err)
|
|
||||||
w.Header().Add("Content-Type", "application/json")
|
|
||||||
_, err = w.Write(resultBytes)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
// Handle requests for listing files in a record
|
|
||||||
mux.HandleFunc("GET /api/records/{record}/files", func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// Return the most basic response
|
|
||||||
filesBaseURL, err := url.Parse("http://" + r.Host)
|
|
||||||
require.NoError(t, err)
|
|
||||||
filesBaseURL = filesBaseURL.JoinPath("/api/files/")
|
|
||||||
|
|
||||||
entries := []api.InvenioFilesResponseEntry{}
|
|
||||||
for filename, contents := range files {
|
|
||||||
entries = append(entries,
|
|
||||||
api.InvenioFilesResponseEntry{
|
|
||||||
Key: filename,
|
|
||||||
Checksum: md5Sum(contents),
|
|
||||||
Size: int64(len(contents)),
|
|
||||||
Updated: time.Now().UTC().Format(time.RFC3339),
|
|
||||||
MimeType: "text/plain; charset=utf-8",
|
|
||||||
Links: api.InvenioFilesResponseEntryLinks{
|
|
||||||
Content: filesBaseURL.JoinPath(filename).String(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
result := api.InvenioFilesResponse{
|
|
||||||
Entries: entries,
|
|
||||||
}
|
|
||||||
resultBytes, err := json.Marshal(result)
|
|
||||||
require.NoError(t, err)
|
|
||||||
w.Header().Add("Content-Type", "application/json")
|
|
||||||
_, err = w.Write(resultBytes)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
// Handle requests for file contents
|
|
||||||
mux.HandleFunc("/api/files/{file}", func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// Check that we are returning the contents of a file
|
|
||||||
filename := strings.TrimPrefix(r.URL.Path, "/api/files/")
|
|
||||||
assert.NotEmpty(t, filename)
|
|
||||||
contents, found := files[filename]
|
|
||||||
if !found {
|
|
||||||
w.WriteHeader(404)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the most basic response
|
|
||||||
_, err := w.Write([]byte(contents))
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Make the test server
|
|
||||||
ts := httptest.NewServer(mux)
|
|
||||||
|
|
||||||
// Close the server at the end of the test
|
|
||||||
t.Cleanup(ts.Close)
|
|
||||||
|
|
||||||
return ts
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestZenodoRemote(t *testing.T) {
|
|
||||||
recordID := "2600782"
|
|
||||||
doi := "10.5281/zenodo.2600782"
|
|
||||||
|
|
||||||
// The files in the dataset
|
|
||||||
files := map[string]string{
|
|
||||||
"README.md": "This is a dataset.",
|
|
||||||
"data.txt": "Some data",
|
|
||||||
}
|
|
||||||
|
|
||||||
ts := prepareMockZenodoServer(t, files)
|
|
||||||
resolvedURL := ts.URL + "/record/" + recordID
|
|
||||||
|
|
||||||
doiResolverAPIURL := prepareMockDoiResolverServer(t, resolvedURL)
|
|
||||||
|
|
||||||
testConfig := configmap.Simple{
|
|
||||||
"type": "doi",
|
|
||||||
"doi": doi,
|
|
||||||
"provider": "zenodo",
|
|
||||||
"doi_resolver_api_url": doiResolverAPIURL,
|
|
||||||
}
|
|
||||||
f, err := NewFs(context.Background(), remoteName, "", testConfig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Test listing the DOI files
|
|
||||||
entries, err := f.List(context.Background(), "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
sort.Sort(entries)
|
|
||||||
|
|
||||||
require.Equal(t, len(files), len(entries))
|
|
||||||
|
|
||||||
e := entries[0]
|
|
||||||
assert.Equal(t, "README.md", e.Remote())
|
|
||||||
assert.Equal(t, int64(18), e.Size())
|
|
||||||
_, ok := e.(*Object)
|
|
||||||
assert.True(t, ok)
|
|
||||||
|
|
||||||
e = entries[1]
|
|
||||||
assert.Equal(t, "data.txt", e.Remote())
|
|
||||||
assert.Equal(t, int64(9), e.Size())
|
|
||||||
_, ok = e.(*Object)
|
|
||||||
assert.True(t, ok)
|
|
||||||
|
|
||||||
// Test reading the DOI files
|
|
||||||
o, err := f.NewObject(context.Background(), "README.md")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(18), o.Size())
|
|
||||||
md5Hash, err := o.Hash(context.Background(), hash.MD5)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "464352b1cab5240e44528a56fda33d9d", md5Hash)
|
|
||||||
fd, err := o.Open(context.Background())
|
|
||||||
require.NoError(t, err)
|
|
||||||
data, err := io.ReadAll(fd)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, fd.Close())
|
|
||||||
assert.Equal(t, []byte(files["README.md"]), data)
|
|
||||||
do, ok := o.(fs.MimeTyper)
|
|
||||||
require.True(t, ok)
|
|
||||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
|
||||||
|
|
||||||
o, err = f.NewObject(context.Background(), "data.txt")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(9), o.Size())
|
|
||||||
md5Hash, err = o.Hash(context.Background(), hash.MD5)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "5b82f8bf4df2bfb0e66ccaa7306fd024", md5Hash)
|
|
||||||
fd, err = o.Open(context.Background())
|
|
||||||
require.NoError(t, err)
|
|
||||||
data, err = io.ReadAll(fd)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, fd.Close())
|
|
||||||
assert.Equal(t, []byte(files["data.txt"]), data)
|
|
||||||
do, ok = o.(fs.MimeTyper)
|
|
||||||
require.True(t, ok)
|
|
||||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
// Test DOI filesystem interface
|
|
||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestDoi:",
|
|
||||||
NilObject: (*Object)(nil),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,164 +0,0 @@
|
|||||||
// Implementation for InvenioRDM
|
|
||||||
|
|
||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/doi/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
var invenioRecordRegex = regexp.MustCompile(`\/records?\/(.+)`)
|
|
||||||
|
|
||||||
// Returns true if resolvedURL is likely a DOI hosted on an InvenioRDM intallation
|
|
||||||
func activateInvenio(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (isActive bool) {
|
|
||||||
_, _, err := resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve the main API endpoint for a DOI hosted on an InvenioRDM installation
|
|
||||||
func resolveInvenioEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
|
|
||||||
var res *http.Response
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: resolvedURL.String(),
|
|
||||||
}
|
|
||||||
err = pacer.Call(func() (bool, error) {
|
|
||||||
res, err = srv.Call(ctx, &opts)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// First, attempt to grab the API URL from the headers
|
|
||||||
var linksetURL *url.URL
|
|
||||||
links := parseLinkHeader(res.Header.Get("Link"))
|
|
||||||
for _, link := range links {
|
|
||||||
if link.Rel == "linkset" && link.Type == "application/linkset+json" {
|
|
||||||
parsed, err := url.Parse(link.Href)
|
|
||||||
if err == nil {
|
|
||||||
linksetURL = parsed
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if linksetURL != nil {
|
|
||||||
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, linksetURL)
|
|
||||||
if err == nil {
|
|
||||||
return Invenio, endpoint, nil
|
|
||||||
}
|
|
||||||
fs.Logf(nil, "using linkset URL failed: %s", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there is no linkset header, try to grab the record ID from the URL
|
|
||||||
recordID := ""
|
|
||||||
resURL := res.Request.URL
|
|
||||||
match := invenioRecordRegex.FindStringSubmatch(resURL.EscapedPath())
|
|
||||||
if match != nil {
|
|
||||||
recordID = match[1]
|
|
||||||
guessedURL := res.Request.URL.ResolveReference(&url.URL{
|
|
||||||
Path: "/api/records/" + recordID,
|
|
||||||
})
|
|
||||||
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, guessedURL)
|
|
||||||
if err == nil {
|
|
||||||
return Invenio, endpoint, nil
|
|
||||||
}
|
|
||||||
fs.Logf(nil, "guessing the URL failed: %s", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", nil, fmt.Errorf("could not resolve the Invenio API endpoint for '%s'", resolvedURL.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkInvenioAPIURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (endpoint *url.URL, err error) {
|
|
||||||
var result api.InvenioRecordResponse
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: resolvedURL.String(),
|
|
||||||
}
|
|
||||||
err = pacer.Call(func() (bool, error) {
|
|
||||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if result.Links.Self == "" {
|
|
||||||
return nil, fmt.Errorf("could not parse API response from '%s'", resolvedURL.String())
|
|
||||||
}
|
|
||||||
return url.Parse(result.Links.Self)
|
|
||||||
}
|
|
||||||
|
|
||||||
// invenioProvider implements the doiProvider interface for InvenioRDM installations
|
|
||||||
type invenioProvider struct {
|
|
||||||
f *Fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
|
||||||
func (ip *invenioProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
|
|
||||||
// Use the cache if populated
|
|
||||||
cachedEntries, found := ip.f.cache.GetMaybe("files")
|
|
||||||
if found {
|
|
||||||
parsedEntries, ok := cachedEntries.([]Object)
|
|
||||||
if ok {
|
|
||||||
for _, entry := range parsedEntries {
|
|
||||||
newEntry := entry
|
|
||||||
entries = append(entries, &newEntry)
|
|
||||||
}
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
filesURL := ip.f.endpoint.JoinPath("files")
|
|
||||||
var result api.InvenioFilesResponse
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
|
|
||||||
}
|
|
||||||
err = ip.f.pacer.Call(func() (bool, error) {
|
|
||||||
res, err := ip.f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
|
||||||
}
|
|
||||||
for _, file := range result.Entries {
|
|
||||||
modTime, modTimeErr := time.Parse(time.RFC3339, file.Updated)
|
|
||||||
if modTimeErr != nil {
|
|
||||||
fs.Logf(ip.f, "error: could not parse last update time %v", modTimeErr)
|
|
||||||
modTime = timeUnset
|
|
||||||
}
|
|
||||||
entry := &Object{
|
|
||||||
fs: ip.f,
|
|
||||||
remote: file.Key,
|
|
||||||
contentURL: file.Links.Content,
|
|
||||||
size: file.Size,
|
|
||||||
modTime: modTime,
|
|
||||||
contentType: file.MimeType,
|
|
||||||
md5: strings.TrimPrefix(file.Checksum, "md5:"),
|
|
||||||
}
|
|
||||||
entries = append(entries, entry)
|
|
||||||
}
|
|
||||||
// Populate the cache
|
|
||||||
cacheEntries := []Object{}
|
|
||||||
for _, entry := range entries {
|
|
||||||
cacheEntries = append(cacheEntries, *entry)
|
|
||||||
}
|
|
||||||
ip.f.cache.Put("files", cacheEntries)
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newInvenioProvider(f *Fs) doiProvider {
|
|
||||||
return &invenioProvider{
|
|
||||||
f: f,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var linkRegex = regexp.MustCompile(`^<(.+)>$`)
|
|
||||||
var valueRegex = regexp.MustCompile(`^"(.+)"$`)
|
|
||||||
|
|
||||||
// headerLink represents a link as presented in HTTP headers
|
|
||||||
// MDN Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Link
|
|
||||||
type headerLink struct {
|
|
||||||
Href string
|
|
||||||
Rel string
|
|
||||||
Type string
|
|
||||||
Extras map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseLinkHeader(header string) (links []headerLink) {
|
|
||||||
for _, link := range strings.Split(header, ",") {
|
|
||||||
link = strings.TrimSpace(link)
|
|
||||||
parsed := parseLink(link)
|
|
||||||
if parsed != nil {
|
|
||||||
links = append(links, *parsed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return links
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseLink(link string) (parsedLink *headerLink) {
|
|
||||||
var parts []string
|
|
||||||
for _, part := range strings.Split(link, ";") {
|
|
||||||
parts = append(parts, strings.TrimSpace(part))
|
|
||||||
}
|
|
||||||
|
|
||||||
match := linkRegex.FindStringSubmatch(parts[0])
|
|
||||||
if match == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result := &headerLink{
|
|
||||||
Href: match[1],
|
|
||||||
Extras: map[string]string{},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, keyValue := range parts[1:] {
|
|
||||||
parsed := parseKeyValue(keyValue)
|
|
||||||
if parsed != nil {
|
|
||||||
key, value := parsed[0], parsed[1]
|
|
||||||
switch strings.ToLower(key) {
|
|
||||||
case "rel":
|
|
||||||
result.Rel = value
|
|
||||||
case "type":
|
|
||||||
result.Type = value
|
|
||||||
default:
|
|
||||||
result.Extras[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseKeyValue(keyValue string) []string {
|
|
||||||
parts := strings.SplitN(keyValue, "=", 2)
|
|
||||||
if parts[0] == "" || len(parts) < 2 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
match := valueRegex.FindStringSubmatch(parts[1])
|
|
||||||
if match != nil {
|
|
||||||
parts[1] = match[1]
|
|
||||||
return parts
|
|
||||||
}
|
|
||||||
return parts
|
|
||||||
}
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestParseLinkHeader(t *testing.T) {
|
|
||||||
header := "<https://zenodo.org/api/records/15063252> ; rel=\"linkset\" ; type=\"application/linkset+json\""
|
|
||||||
links := parseLinkHeader(header)
|
|
||||||
expected := headerLink{
|
|
||||||
Href: "https://zenodo.org/api/records/15063252",
|
|
||||||
Rel: "linkset",
|
|
||||||
Type: "application/linkset+json",
|
|
||||||
Extras: map[string]string{},
|
|
||||||
}
|
|
||||||
assert.Contains(t, links, expected)
|
|
||||||
|
|
||||||
header = "<https://api.example.com/issues?page=2>; rel=\"prev\", <https://api.example.com/issues?page=4>; rel=\"next\", <https://api.example.com/issues?page=10>; rel=\"last\", <https://api.example.com/issues?page=1>; rel=\"first\""
|
|
||||||
links = parseLinkHeader(header)
|
|
||||||
expectedList := []headerLink{{
|
|
||||||
Href: "https://api.example.com/issues?page=2",
|
|
||||||
Rel: "prev",
|
|
||||||
Type: "",
|
|
||||||
Extras: map[string]string{},
|
|
||||||
}, {
|
|
||||||
Href: "https://api.example.com/issues?page=4",
|
|
||||||
Rel: "next",
|
|
||||||
Type: "",
|
|
||||||
Extras: map[string]string{},
|
|
||||||
}, {
|
|
||||||
Href: "https://api.example.com/issues?page=10",
|
|
||||||
Rel: "last",
|
|
||||||
Type: "",
|
|
||||||
Extras: map[string]string{},
|
|
||||||
}, {
|
|
||||||
Href: "https://api.example.com/issues?page=1",
|
|
||||||
Rel: "first",
|
|
||||||
Type: "",
|
|
||||||
Extras: map[string]string{},
|
|
||||||
}}
|
|
||||||
assert.Equal(t, links, expectedList)
|
|
||||||
}
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
// Implementation for Zenodo
|
|
||||||
|
|
||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/doi/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
var zenodoRecordRegex = regexp.MustCompile(`zenodo[.](.+)`)
|
|
||||||
|
|
||||||
// Resolve the main API endpoint for a DOI hosted on Zenodo
|
|
||||||
func resolveZenodoEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL, doi string) (provider Provider, endpoint *url.URL, err error) {
|
|
||||||
match := zenodoRecordRegex.FindStringSubmatch(doi)
|
|
||||||
if match == nil {
|
|
||||||
return "", nil, fmt.Errorf("could not derive API endpoint URL from '%s'", resolvedURL.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
recordID := match[1]
|
|
||||||
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/records/" + recordID})
|
|
||||||
|
|
||||||
var result api.InvenioRecordResponse
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: endpointURL.String(),
|
|
||||||
}
|
|
||||||
err = pacer.Call(func() (bool, error) {
|
|
||||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
endpointURL, err = url.Parse(result.Links.Self)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return Zenodo, endpointURL, nil
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -95,7 +95,7 @@ func TestInternalParseExtensions(t *testing.T) {
|
|||||||
wantErr error
|
wantErr error
|
||||||
}{
|
}{
|
||||||
{"doc", []string{".doc"}, nil},
|
{"doc", []string{".doc"}, nil},
|
||||||
{" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil},
|
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
|
||||||
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
||||||
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
||||||
} {
|
} {
|
||||||
@@ -479,8 +479,8 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
|||||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID
|
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
|
||||||
func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
|
func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
obj, err := f.NewObject(ctx, existingFile)
|
obj, err := f.NewObject(ctx, existingFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -498,7 +498,7 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Run("BadID", func(t *testing.T) {
|
t.Run("BadID", func(t *testing.T) {
|
||||||
err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/")
|
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
assert.Contains(t, err.Error(), "couldn't find id")
|
assert.Contains(t, err.Error(), "couldn't find id")
|
||||||
})
|
})
|
||||||
@@ -506,71 +506,22 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
|
|||||||
t.Run("Directory", func(t *testing.T) {
|
t.Run("Directory", func(t *testing.T) {
|
||||||
rootID, err := f.dirCache.RootID(ctx, false)
|
rootID, err := f.dirCache.RootID(ctx, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/")
|
err = f.copyID(ctx, rootID, dir+"/")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
assert.Contains(t, err.Error(), "can't moveid directory")
|
assert.Contains(t, err.Error(), "can't copy directory")
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("MoveWithoutDestName", func(t *testing.T) {
|
t.Run("WithoutDestName", func(t *testing.T) {
|
||||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/")
|
err = f.copyID(ctx, o.id, dir+"/")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
checkFile(path.Base(existingFile))
|
checkFile(path.Base(existingFile))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("CopyWithoutDestName", func(t *testing.T) {
|
t.Run("WithDestName", func(t *testing.T) {
|
||||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/")
|
err = f.copyID(ctx, o.id, dir+"/potato.txt")
|
||||||
require.NoError(t, err)
|
|
||||||
checkFile(path.Base(existingFile))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("MoveWithDestName", func(t *testing.T) {
|
|
||||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt")
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
checkFile("potato.txt")
|
checkFile("potato.txt")
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("CopyWithDestName", func(t *testing.T) {
|
|
||||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt")
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkFile("potato.txt")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/Query
|
|
||||||
func (f *Fs) InternalTestQuery(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
var err error
|
|
||||||
t.Run("BadQuery", func(t *testing.T) {
|
|
||||||
_, err = f.query(ctx, "this is a bad query")
|
|
||||||
require.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "failed to execute query")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("NoMatch", func(t *testing.T) {
|
|
||||||
results, err := f.query(ctx, fmt.Sprintf("name='%s' and name!='%s'", existingSubDir, existingSubDir))
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, results, 0)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("GoodQuery", func(t *testing.T) {
|
|
||||||
pathSegments := strings.Split(existingFile, "/")
|
|
||||||
var parent string
|
|
||||||
for _, item := range pathSegments {
|
|
||||||
// the file name contains ' characters which must be escaped
|
|
||||||
escapedItem := f.opt.Enc.FromStandardName(item)
|
|
||||||
escapedItem = strings.ReplaceAll(escapedItem, `\`, `\\`)
|
|
||||||
escapedItem = strings.ReplaceAll(escapedItem, `'`, `\'`)
|
|
||||||
|
|
||||||
results, err := f.query(ctx, fmt.Sprintf("%strashed=false and name='%s'", parent, escapedItem))
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.True(t, len(results) > 0)
|
|
||||||
for _, result := range results {
|
|
||||||
assert.True(t, len(result.Id) > 0)
|
|
||||||
assert.Equal(t, result.Name, item)
|
|
||||||
}
|
|
||||||
parent = fmt.Sprintf("'%s' in parents and ", results[0].Id)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
||||||
@@ -578,7 +529,7 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
|||||||
// Check set up for filtering
|
// Check set up for filtering
|
||||||
assert.True(t, f.Features().FilterAware)
|
assert.True(t, f.Features().FilterAware)
|
||||||
|
|
||||||
opt := &filter.Options{}
|
opt := &filter.Opt{}
|
||||||
err := opt.MaxAge.Set("1h")
|
err := opt.MaxAge.Set("1h")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
flt, err := filter.NewFilter(opt)
|
flt, err := filter.NewFilter(opt)
|
||||||
@@ -659,8 +610,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
})
|
})
|
||||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||||
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID)
|
t.Run("CopyID", f.InternalTestCopyID)
|
||||||
t.Run("Query", f.InternalTestQuery)
|
|
||||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,639 +0,0 @@
|
|||||||
package drive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"maps"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/lib/errcount"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
drive "google.golang.org/api/drive/v3"
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
|
||||||
|
|
||||||
// system metadata keys which this backend owns
|
|
||||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
|
||||||
"content-type": {
|
|
||||||
Help: "The MIME type of the file.",
|
|
||||||
Type: "string",
|
|
||||||
Example: "text/plain",
|
|
||||||
},
|
|
||||||
"mtime": {
|
|
||||||
Help: "Time of last modification with mS accuracy.",
|
|
||||||
Type: "RFC 3339",
|
|
||||||
Example: "2006-01-02T15:04:05.999Z07:00",
|
|
||||||
},
|
|
||||||
"btime": {
|
|
||||||
Help: "Time of file birth (creation) with mS accuracy. Note that this is only writable on fresh uploads - it can't be written for updates.",
|
|
||||||
Type: "RFC 3339",
|
|
||||||
Example: "2006-01-02T15:04:05.999Z07:00",
|
|
||||||
},
|
|
||||||
"copy-requires-writer-permission": {
|
|
||||||
Help: "Whether the options to copy, print, or download this file, should be disabled for readers and commenters.",
|
|
||||||
Type: "boolean",
|
|
||||||
Example: "true",
|
|
||||||
},
|
|
||||||
"writers-can-share": {
|
|
||||||
Help: "Whether users with only writer permission can modify the file's permissions. Not populated and ignored when setting for items in shared drives.",
|
|
||||||
Type: "boolean",
|
|
||||||
Example: "false",
|
|
||||||
},
|
|
||||||
"viewed-by-me": {
|
|
||||||
Help: "Whether the file has been viewed by this user.",
|
|
||||||
Type: "boolean",
|
|
||||||
Example: "true",
|
|
||||||
ReadOnly: true,
|
|
||||||
},
|
|
||||||
"owner": {
|
|
||||||
Help: "The owner of the file. Usually an email address. Enable with --drive-metadata-owner.",
|
|
||||||
Type: "string",
|
|
||||||
Example: "user@example.com",
|
|
||||||
},
|
|
||||||
"permissions": {
|
|
||||||
Help: "Permissions in a JSON dump of Google drive format. On shared drives these will only be present if they aren't inherited. Enable with --drive-metadata-permissions.",
|
|
||||||
Type: "JSON",
|
|
||||||
Example: "{}",
|
|
||||||
},
|
|
||||||
"folder-color-rgb": {
|
|
||||||
Help: "The color for a folder or a shortcut to a folder as an RGB hex string.",
|
|
||||||
Type: "string",
|
|
||||||
Example: "881133",
|
|
||||||
},
|
|
||||||
"description": {
|
|
||||||
Help: "A short description of the file.",
|
|
||||||
Type: "string",
|
|
||||||
Example: "Contract for signing",
|
|
||||||
},
|
|
||||||
"starred": {
|
|
||||||
Help: "Whether the user has starred the file.",
|
|
||||||
Type: "boolean",
|
|
||||||
Example: "false",
|
|
||||||
},
|
|
||||||
"labels": {
|
|
||||||
Help: "Labels attached to this file in a JSON dump of Googled drive format. Enable with --drive-metadata-labels.",
|
|
||||||
Type: "JSON",
|
|
||||||
Example: "[]",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extra fields we need to fetch to implement the system metadata above
|
|
||||||
var metadataFields = googleapi.Field(strings.Join([]string{
|
|
||||||
"copyRequiresWriterPermission",
|
|
||||||
"description",
|
|
||||||
"folderColorRgb",
|
|
||||||
"hasAugmentedPermissions",
|
|
||||||
"owners",
|
|
||||||
"permissionIds",
|
|
||||||
"permissions",
|
|
||||||
"properties",
|
|
||||||
"starred",
|
|
||||||
"viewedByMe",
|
|
||||||
"viewedByMeTime",
|
|
||||||
"writersCanShare",
|
|
||||||
}, ","))
|
|
||||||
|
|
||||||
// Fields we need to read from permissions
|
|
||||||
var permissionsFields = googleapi.Field(strings.Join([]string{
|
|
||||||
"*",
|
|
||||||
"permissionDetails/*",
|
|
||||||
}, ","))
|
|
||||||
|
|
||||||
// getPermission returns permissions for the fileID and permissionID passed in
|
|
||||||
func (f *Fs) getPermission(ctx context.Context, fileID, permissionID string, useCache bool) (perm *drive.Permission, inherited bool, err error) {
|
|
||||||
f.permissionsMu.Lock()
|
|
||||||
defer f.permissionsMu.Unlock()
|
|
||||||
if useCache {
|
|
||||||
perm = f.permissions[permissionID]
|
|
||||||
if perm != nil {
|
|
||||||
return perm, false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fs.Debugf(f, "Fetching permission %q", permissionID)
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
perm, err = f.svc.Permissions.Get(fileID, permissionID).
|
|
||||||
Fields(permissionsFields).
|
|
||||||
SupportsAllDrives(true).
|
|
||||||
Context(ctx).Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
inherited = len(perm.PermissionDetails) > 0 && perm.PermissionDetails[0].Inherited
|
|
||||||
|
|
||||||
cleanPermission(perm)
|
|
||||||
|
|
||||||
// cache the permission
|
|
||||||
f.permissions[permissionID] = perm
|
|
||||||
|
|
||||||
return perm, inherited, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the permissions on the info
|
|
||||||
func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions []*drive.Permission) (err error) {
|
|
||||||
errs := errcount.New()
|
|
||||||
for _, perm := range permissions {
|
|
||||||
if perm.Role == "owner" {
|
|
||||||
// ignore owner permissions - these are set with owner
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cleanPermissionForWrite(perm)
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err := f.svc.Permissions.Create(info.Id, perm).
|
|
||||||
SupportsAllDrives(true).
|
|
||||||
SendNotificationEmail(false).
|
|
||||||
Context(ctx).Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(f, "Failed to set permission %s for %q: %v", perm.Role, perm.EmailAddress, err)
|
|
||||||
errs.Add(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = errs.Err("failed to set permission")
|
|
||||||
if err != nil {
|
|
||||||
err = fserrors.NoRetryError(err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean attributes from permissions which we can't write
|
|
||||||
func cleanPermissionForWrite(perm *drive.Permission) {
|
|
||||||
perm.Deleted = false
|
|
||||||
perm.DisplayName = ""
|
|
||||||
perm.Id = ""
|
|
||||||
perm.Kind = ""
|
|
||||||
perm.PermissionDetails = nil
|
|
||||||
perm.TeamDrivePermissionDetails = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean and cache the permission if not already cached
|
|
||||||
func (f *Fs) cleanAndCachePermission(perm *drive.Permission) {
|
|
||||||
f.permissionsMu.Lock()
|
|
||||||
defer f.permissionsMu.Unlock()
|
|
||||||
cleanPermission(perm)
|
|
||||||
if _, found := f.permissions[perm.Id]; !found {
|
|
||||||
f.permissions[perm.Id] = perm
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean fields we don't need to keep from the permission
|
|
||||||
func cleanPermission(perm *drive.Permission) {
|
|
||||||
// DisplayName: Output only. The "pretty" name of the value of the
|
|
||||||
// permission. The following is a list of examples for each type of
|
|
||||||
// permission: * `user` - User's full name, as defined for their Google
|
|
||||||
// account, such as "Joe Smith." * `group` - Name of the Google Group,
|
|
||||||
// such as "The Company Administrators." * `domain` - String domain
|
|
||||||
// name, such as "thecompany.com." * `anyone` - No `displayName` is
|
|
||||||
// present.
|
|
||||||
perm.DisplayName = ""
|
|
||||||
|
|
||||||
// Kind: Output only. Identifies what kind of resource this is. Value:
|
|
||||||
// the fixed string "drive#permission".
|
|
||||||
perm.Kind = ""
|
|
||||||
|
|
||||||
// PermissionDetails: Output only. Details of whether the permissions on
|
|
||||||
// this shared drive item are inherited or directly on this item. This
|
|
||||||
// is an output-only field which is present only for shared drive items.
|
|
||||||
perm.PermissionDetails = nil
|
|
||||||
|
|
||||||
// PhotoLink: Output only. A link to the user's profile photo, if
|
|
||||||
// available.
|
|
||||||
perm.PhotoLink = ""
|
|
||||||
|
|
||||||
// TeamDrivePermissionDetails: Output only. Deprecated: Output only. Use
|
|
||||||
// `permissionDetails` instead.
|
|
||||||
perm.TeamDrivePermissionDetails = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fields we need to read from labels
|
|
||||||
var labelsFields = googleapi.Field(strings.Join([]string{
|
|
||||||
"*",
|
|
||||||
}, ","))
|
|
||||||
|
|
||||||
// getLabels returns labels for the fileID passed in
|
|
||||||
func (f *Fs) getLabels(ctx context.Context, fileID string) (labels []*drive.Label, err error) {
|
|
||||||
fs.Debugf(f, "Fetching labels for %q", fileID)
|
|
||||||
listLabels := f.svc.Files.ListLabels(fileID).
|
|
||||||
Fields(labelsFields).
|
|
||||||
Context(ctx)
|
|
||||||
for {
|
|
||||||
var info *drive.LabelList
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
info, err = listLabels.Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
labels = append(labels, info.Labels...)
|
|
||||||
if info.NextPageToken == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
listLabels.PageToken(info.NextPageToken)
|
|
||||||
}
|
|
||||||
for _, label := range labels {
|
|
||||||
cleanLabel(label)
|
|
||||||
}
|
|
||||||
return labels, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the labels on the info
|
|
||||||
func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.Label) (err error) {
|
|
||||||
if len(labels) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
req := drive.ModifyLabelsRequest{}
|
|
||||||
for _, label := range labels {
|
|
||||||
req.LabelModifications = append(req.LabelModifications, &drive.LabelModification{
|
|
||||||
FieldModifications: labelFieldsToFieldModifications(label.Fields),
|
|
||||||
LabelId: label.Id,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err = f.svc.Files.ModifyLabels(info.Id, &req).
|
|
||||||
Context(ctx).Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to set labels: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert label fields into something which can set the fields
|
|
||||||
func labelFieldsToFieldModifications(fields map[string]drive.LabelField) (out []*drive.LabelFieldModification) {
|
|
||||||
for id, field := range fields {
|
|
||||||
var emails []string
|
|
||||||
for _, user := range field.User {
|
|
||||||
emails = append(emails, user.EmailAddress)
|
|
||||||
}
|
|
||||||
out = append(out, &drive.LabelFieldModification{
|
|
||||||
// FieldId: The ID of the field to be modified.
|
|
||||||
FieldId: id,
|
|
||||||
|
|
||||||
// SetDateValues: Replaces the value of a dateString Field with these
|
|
||||||
// new values. The string must be in the RFC 3339 full-date format:
|
|
||||||
// YYYY-MM-DD.
|
|
||||||
SetDateValues: field.DateString,
|
|
||||||
|
|
||||||
// SetIntegerValues: Replaces the value of an `integer` field with these
|
|
||||||
// new values.
|
|
||||||
SetIntegerValues: field.Integer,
|
|
||||||
|
|
||||||
// SetSelectionValues: Replaces a `selection` field with these new
|
|
||||||
// values.
|
|
||||||
SetSelectionValues: field.Selection,
|
|
||||||
|
|
||||||
// SetTextValues: Sets the value of a `text` field.
|
|
||||||
SetTextValues: field.Text,
|
|
||||||
|
|
||||||
// SetUserValues: Replaces a `user` field with these new values. The
|
|
||||||
// values must be valid email addresses.
|
|
||||||
SetUserValues: emails,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean fields we don't need to keep from the label
|
|
||||||
func cleanLabel(label *drive.Label) {
|
|
||||||
// Kind: This is always drive#label
|
|
||||||
label.Kind = ""
|
|
||||||
|
|
||||||
for name, field := range label.Fields {
|
|
||||||
// Kind: This is always drive#labelField.
|
|
||||||
field.Kind = ""
|
|
||||||
|
|
||||||
// Note the fields are copies so we need to write them
|
|
||||||
// back to the map
|
|
||||||
label.Fields[name] = field
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the metadata from drive item
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err error) {
|
|
||||||
metadata := make(fs.Metadata, 16)
|
|
||||||
|
|
||||||
// Dump user metadata first as it overrides system metadata
|
|
||||||
maps.Copy(metadata, info.Properties)
|
|
||||||
|
|
||||||
// System metadata
|
|
||||||
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
|
|
||||||
metadata["writers-can-share"] = fmt.Sprint(info.WritersCanShare)
|
|
||||||
metadata["viewed-by-me"] = fmt.Sprint(info.ViewedByMe)
|
|
||||||
metadata["content-type"] = info.MimeType
|
|
||||||
|
|
||||||
// Owners: Output only. The owner of this file. Only certain legacy
|
|
||||||
// files may have more than one owner. This field isn't populated for
|
|
||||||
// items in shared drives.
|
|
||||||
if o.fs.opt.MetadataOwner.IsSet(rwRead) && len(info.Owners) > 0 {
|
|
||||||
user := info.Owners[0]
|
|
||||||
if len(info.Owners) > 1 {
|
|
||||||
fs.Logf(o, "Ignoring more than 1 owner")
|
|
||||||
}
|
|
||||||
if user != nil {
|
|
||||||
id := user.EmailAddress
|
|
||||||
if id == "" {
|
|
||||||
id = user.DisplayName
|
|
||||||
}
|
|
||||||
metadata["owner"] = id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.fs.opt.MetadataPermissions.IsSet(rwRead) {
|
|
||||||
// We only write permissions out if they are not inherited.
|
|
||||||
//
|
|
||||||
// On My Drives permissions seem to be attached to every item
|
|
||||||
// so they will always be written out.
|
|
||||||
//
|
|
||||||
// On Shared Drives only non-inherited permissions will be
|
|
||||||
// written out.
|
|
||||||
|
|
||||||
// To read the inherited permissions flag will mean we need to
|
|
||||||
// read the permissions for each object and the cache will be
|
|
||||||
// useless. However shared drives don't return permissions
|
|
||||||
// only permissionIds so will need to fetch them for each
|
|
||||||
// object. We use HasAugmentedPermissions to see if there are
|
|
||||||
// special permissions before fetching them to save transactions.
|
|
||||||
|
|
||||||
// HasAugmentedPermissions: Output only. Whether there are permissions
|
|
||||||
// directly on this file. This field is only populated for items in
|
|
||||||
// shared drives.
|
|
||||||
if o.fs.isTeamDrive && !info.HasAugmentedPermissions {
|
|
||||||
// Don't process permissions if there aren't any specifically set
|
|
||||||
fs.Debugf(o, "Ignoring %d permissions and %d permissionIds as is shared drive with hasAugmentedPermissions false", len(info.Permissions), len(info.PermissionIds))
|
|
||||||
info.Permissions = nil
|
|
||||||
info.PermissionIds = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PermissionIds: Output only. List of permission IDs for users with
|
|
||||||
// access to this file.
|
|
||||||
//
|
|
||||||
// Only process these if we have no Permissions
|
|
||||||
if len(info.PermissionIds) > 0 && len(info.Permissions) == 0 {
|
|
||||||
info.Permissions = make([]*drive.Permission, 0, len(info.PermissionIds))
|
|
||||||
g, gCtx := errgroup.WithContext(ctx)
|
|
||||||
g.SetLimit(o.fs.ci.Checkers)
|
|
||||||
var mu sync.Mutex // protect the info.Permissions from concurrent writes
|
|
||||||
for _, permissionID := range info.PermissionIds {
|
|
||||||
permissionID := permissionID
|
|
||||||
g.Go(func() error {
|
|
||||||
// must fetch the team drive ones individually to check the inherited flag
|
|
||||||
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to read permission: %w", err)
|
|
||||||
}
|
|
||||||
// Don't write inherited permissions out
|
|
||||||
if inherited {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Don't write owner role out - these are covered by the owner metadata
|
|
||||||
if perm.Role == "owner" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
info.Permissions = append(info.Permissions, perm)
|
|
||||||
mu.Unlock()
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
err = g.Wait()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Clean the fetched permissions
|
|
||||||
for _, perm := range info.Permissions {
|
|
||||||
o.fs.cleanAndCachePermission(perm)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Permissions: Output only. The full list of permissions for the file.
|
|
||||||
// This is only available if the requesting user can share the file. Not
|
|
||||||
// populated for items in shared drives.
|
|
||||||
if len(info.Permissions) > 0 {
|
|
||||||
buf, err := json.Marshal(info.Permissions)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to marshal permissions: %w", err)
|
|
||||||
}
|
|
||||||
metadata["permissions"] = string(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Permission propagation
|
|
||||||
// https://developers.google.com/drive/api/guides/manage-sharing#permission-propagation
|
|
||||||
// Leads me to believe that in non shared drives, permissions
|
|
||||||
// are added to each item when you set permissions for a
|
|
||||||
// folder whereas in shared drives they are inherited and
|
|
||||||
// placed on the item directly.
|
|
||||||
}
|
|
||||||
|
|
||||||
if info.FolderColorRgb != "" {
|
|
||||||
metadata["folder-color-rgb"] = info.FolderColorRgb
|
|
||||||
}
|
|
||||||
if info.Description != "" {
|
|
||||||
metadata["description"] = info.Description
|
|
||||||
}
|
|
||||||
metadata["starred"] = fmt.Sprint(info.Starred)
|
|
||||||
metadata["btime"] = info.CreatedTime
|
|
||||||
metadata["mtime"] = info.ModifiedTime
|
|
||||||
|
|
||||||
if o.fs.opt.MetadataLabels.IsSet(rwRead) {
|
|
||||||
// FIXME would be really nice if we knew if files had labels
|
|
||||||
// before listing but we need to know all possible label IDs
|
|
||||||
// to get it in the listing.
|
|
||||||
|
|
||||||
labels, err := o.fs.getLabels(ctx, actualID(info.Id))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to fetch labels: %w", err)
|
|
||||||
}
|
|
||||||
buf, err := json.Marshal(labels)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to marshal labels: %w", err)
|
|
||||||
}
|
|
||||||
metadata["labels"] = string(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
o.metadata = &metadata
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the owner on the info
|
|
||||||
func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err error) {
|
|
||||||
perm := drive.Permission{
|
|
||||||
Role: "owner",
|
|
||||||
EmailAddress: owner,
|
|
||||||
// Type: The type of the grantee. Valid values are: * `user` * `group` *
|
|
||||||
// `domain` * `anyone` When creating a permission, if `type` is `user`
|
|
||||||
// or `group`, you must provide an `emailAddress` for the user or group.
|
|
||||||
// When `type` is `domain`, you must provide a `domain`. There isn't
|
|
||||||
// extra information required for an `anyone` type.
|
|
||||||
Type: "user",
|
|
||||||
}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err = f.svc.Permissions.Create(info.Id, &perm).
|
|
||||||
SupportsAllDrives(true).
|
|
||||||
TransferOwnership(true).
|
|
||||||
// SendNotificationEmail(false). - required apparently!
|
|
||||||
Context(ctx).Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to set owner: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call back to set metadata that can't be set on the upload/update
|
|
||||||
//
|
|
||||||
// The *drive.File passed in holds the current state of the drive.File
|
|
||||||
// and this should update it with any modifications.
|
|
||||||
type updateMetadataFn func(context.Context, *drive.File) error
|
|
||||||
|
|
||||||
// read the metadata from meta and write it into updateInfo
|
|
||||||
//
|
|
||||||
// update should be true if this is being used to create metadata for
|
|
||||||
// an update/PATCH call as the rules on what can be updated are
|
|
||||||
// slightly different there.
|
|
||||||
//
|
|
||||||
// It returns a callback which should be called to finish the updates
|
|
||||||
// after the data is uploaded.
|
|
||||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) {
|
|
||||||
callbackFns := []updateMetadataFn{}
|
|
||||||
callback = func(ctx context.Context, info *drive.File) error {
|
|
||||||
for _, fn := range callbackFns {
|
|
||||||
err := fn(ctx, info)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// merge metadata into request and user metadata
|
|
||||||
for k, v := range meta {
|
|
||||||
k, v := k, v
|
|
||||||
// parse a boolean from v and write into out
|
|
||||||
parseBool := func(out *bool) error {
|
|
||||||
b, err := strconv.ParseBool(v)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("can't parse metadata %q = %q: %w", k, v, err)
|
|
||||||
}
|
|
||||||
*out = b
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
switch k {
|
|
||||||
case "copy-requires-writer-permission":
|
|
||||||
if isFolder {
|
|
||||||
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
|
|
||||||
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case "writers-can-share":
|
|
||||||
if !f.isTeamDrive {
|
|
||||||
if err := parseBool(&updateInfo.WritersCanShare); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fs.Debugf(f, "Ignoring %s=%s as can't set on shared drives", k, v)
|
|
||||||
}
|
|
||||||
case "viewed-by-me":
|
|
||||||
// Can't write this
|
|
||||||
case "content-type":
|
|
||||||
updateInfo.MimeType = v
|
|
||||||
case "owner":
|
|
||||||
if !f.opt.MetadataOwner.IsSet(rwWrite) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Can't set Owner on upload so need to set afterwards
|
|
||||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
|
||||||
err := f.setOwner(ctx, info, v)
|
|
||||||
if err != nil && f.opt.MetadataOwner.IsSet(rwFailOK) {
|
|
||||||
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
case "permissions":
|
|
||||||
if !f.opt.MetadataPermissions.IsSet(rwWrite) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var perms []*drive.Permission
|
|
||||||
err := json.Unmarshal([]byte(v), &perms)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to unmarshal permissions: %w", err)
|
|
||||||
}
|
|
||||||
// Can't set Permissions on upload so need to set afterwards
|
|
||||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
|
||||||
err := f.setPermissions(ctx, info, perms)
|
|
||||||
if err != nil && f.opt.MetadataPermissions.IsSet(rwFailOK) {
|
|
||||||
// We've already logged the permissions errors individually here
|
|
||||||
fs.Debugf(f, "Ignoring error as failok is set: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
case "labels":
|
|
||||||
if !f.opt.MetadataLabels.IsSet(rwWrite) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var labels []*drive.Label
|
|
||||||
err := json.Unmarshal([]byte(v), &labels)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to unmarshal labels: %w", err)
|
|
||||||
}
|
|
||||||
// Can't set Labels on upload so need to set afterwards
|
|
||||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
|
||||||
err := f.setLabels(ctx, info, labels)
|
|
||||||
if err != nil && f.opt.MetadataLabels.IsSet(rwFailOK) {
|
|
||||||
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
case "folder-color-rgb":
|
|
||||||
updateInfo.FolderColorRgb = v
|
|
||||||
case "description":
|
|
||||||
updateInfo.Description = v
|
|
||||||
case "starred":
|
|
||||||
if err := parseBool(&updateInfo.Starred); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case "btime":
|
|
||||||
if update {
|
|
||||||
fs.Debugf(f, "Skipping btime metadata as can't update it on an existing file: %v", v)
|
|
||||||
} else {
|
|
||||||
updateInfo.CreatedTime = v
|
|
||||||
}
|
|
||||||
case "mtime":
|
|
||||||
updateInfo.ModifiedTime = v
|
|
||||||
default:
|
|
||||||
if updateInfo.Properties == nil {
|
|
||||||
updateInfo.Properties = make(map[string]string, 1)
|
|
||||||
}
|
|
||||||
updateInfo.Properties[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return callback, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch metadata and update updateInfo if --metadata is in use
|
|
||||||
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *drive.File, update bool) (callback updateMetadataFn, err error) {
|
|
||||||
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
|
|
||||||
}
|
|
||||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
|
|
||||||
}
|
|
||||||
return callback, nil
|
|
||||||
}
|
|
||||||
@@ -177,7 +177,10 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
|||||||
if start >= rx.ContentLength {
|
if start >= rx.ContentLength {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
reqSize = min(rx.ContentLength-start, int64(rx.f.opt.ChunkSize))
|
reqSize = rx.ContentLength - start
|
||||||
|
if reqSize >= int64(rx.f.opt.ChunkSize) {
|
||||||
|
reqSize = int64(rx.f.opt.ChunkSize)
|
||||||
|
}
|
||||||
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
||||||
} else {
|
} else {
|
||||||
// If size unknown read into buffer
|
// If size unknown read into buffer
|
||||||
|
|||||||
@@ -8,22 +8,129 @@ package dropbox
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxBatchSize = 1000 // max size the batch can be
|
||||||
|
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
|
||||||
|
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
|
||||||
|
defaultBatchSizeAsync = 100 // default batch size if async
|
||||||
|
)
|
||||||
|
|
||||||
|
// batcher holds info about the current items waiting for upload
|
||||||
|
type batcher struct {
|
||||||
|
f *Fs // Fs this batch is part of
|
||||||
|
mode string // configured batch mode
|
||||||
|
size int // maximum size for batch
|
||||||
|
timeout time.Duration // idle timeout for batch
|
||||||
|
async bool // whether we are using async batching
|
||||||
|
in chan batcherRequest // incoming items to batch
|
||||||
|
closed chan struct{} // close to indicate batcher shut down
|
||||||
|
atexit atexit.FnHandle // atexit handle
|
||||||
|
shutOnce sync.Once // make sure we shutdown once only
|
||||||
|
wg sync.WaitGroup // wait for shutdown
|
||||||
|
}
|
||||||
|
|
||||||
|
// batcherRequest holds an incoming request with a place for a reply
|
||||||
|
type batcherRequest struct {
|
||||||
|
commitInfo *files.UploadSessionFinishArg
|
||||||
|
result chan<- batcherResponse
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return true if batcherRequest is the quit request
|
||||||
|
func (br *batcherRequest) isQuit() bool {
|
||||||
|
return br.commitInfo == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send this to get the engine to quit
|
||||||
|
var quitRequest = batcherRequest{}
|
||||||
|
|
||||||
|
// batcherResponse holds a response to be delivered to clients waiting
|
||||||
|
// for a batch to complete.
|
||||||
|
type batcherResponse struct {
|
||||||
|
err error
|
||||||
|
entry *files.FileMetadata
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBatcher creates a new batcher structure
|
||||||
|
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
|
||||||
|
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
|
||||||
|
if size > maxBatchSize || size < 0 {
|
||||||
|
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
async := false
|
||||||
|
|
||||||
|
switch mode {
|
||||||
|
case "sync":
|
||||||
|
if size <= 0 {
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
|
size = ci.Transfers
|
||||||
|
}
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = defaultTimeoutSync
|
||||||
|
}
|
||||||
|
case "async":
|
||||||
|
if size <= 0 {
|
||||||
|
size = defaultBatchSizeAsync
|
||||||
|
}
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = defaultTimeoutAsync
|
||||||
|
}
|
||||||
|
async = true
|
||||||
|
case "off":
|
||||||
|
size = 0
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &batcher{
|
||||||
|
f: f,
|
||||||
|
mode: mode,
|
||||||
|
size: size,
|
||||||
|
timeout: timeout,
|
||||||
|
async: async,
|
||||||
|
in: make(chan batcherRequest, size),
|
||||||
|
closed: make(chan struct{}),
|
||||||
|
}
|
||||||
|
if b.Batching() {
|
||||||
|
b.atexit = atexit.Register(b.Shutdown)
|
||||||
|
b.wg.Add(1)
|
||||||
|
go b.commitLoop(context.Background())
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Batching returns true if batching is active
|
||||||
|
func (b *batcher) Batching() bool {
|
||||||
|
return b.size > 0
|
||||||
|
}
|
||||||
|
|
||||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||||
func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
|
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||||
var arg = &files.UploadSessionFinishBatchArg{
|
var arg = &files.UploadSessionFinishBatchArg{
|
||||||
Entries: items,
|
Entries: items,
|
||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = b.f.pacer.Call(func() (bool, error) {
|
||||||
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
|
complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
|
||||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
// If error is insufficient space then don't retry
|
||||||
return retry, err
|
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||||
|
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||||
|
err = fserrors.NoRetryError(err)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// after the first chunk is uploaded, we retry everything except the excluded errors
|
// after the first chunk is uploaded, we retry everything
|
||||||
return err != nil, err
|
return err != nil, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -32,10 +139,23 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
|
|||||||
return complete, nil
|
return complete, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called by the batcher to commit a batch
|
// commit a batch
|
||||||
func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []*files.FileMetadata, errors []error) (err error) {
|
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
|
||||||
|
// If commit fails then signal clients if sync
|
||||||
|
var signalled = b.async
|
||||||
|
defer func() {
|
||||||
|
if err != nil && !signalled {
|
||||||
|
// Signal to clients that there was an error
|
||||||
|
for _, result := range results {
|
||||||
|
result <- batcherResponse{err: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
|
||||||
|
fs.Debugf(b.f, "Committing %s", desc)
|
||||||
|
|
||||||
// finalise the batch getting either a result or a job id to poll
|
// finalise the batch getting either a result or a job id to poll
|
||||||
complete, err := f.finishBatch(ctx, items)
|
complete, err := b.finishBatch(ctx, items)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -46,13 +166,19 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
|
|||||||
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Format results for return
|
// Report results to clients
|
||||||
|
var (
|
||||||
|
errorTag = ""
|
||||||
|
errorCount = 0
|
||||||
|
)
|
||||||
for i := range results {
|
for i := range results {
|
||||||
item := entries[i]
|
item := entries[i]
|
||||||
|
resp := batcherResponse{}
|
||||||
if item.Tag == "success" {
|
if item.Tag == "success" {
|
||||||
results[i] = item.Success
|
resp.entry = item.Success
|
||||||
} else {
|
} else {
|
||||||
errorTag := item.Tag
|
errorCount++
|
||||||
|
errorTag = item.Tag
|
||||||
if item.Failure != nil {
|
if item.Failure != nil {
|
||||||
errorTag = item.Failure.Tag
|
errorTag = item.Failure.Tag
|
||||||
if item.Failure.LookupFailed != nil {
|
if item.Failure.LookupFailed != nil {
|
||||||
@@ -65,9 +191,112 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
|
|||||||
errorTag += "/" + item.Failure.PropertiesError.Tag
|
errorTag += "/" + item.Failure.PropertiesError.Tag
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
errors[i] = fmt.Errorf("upload failed: %s", errorTag)
|
resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
|
||||||
|
}
|
||||||
|
if !b.async {
|
||||||
|
results[i] <- resp
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Show signalled so no need to report error to clients from now on
|
||||||
|
signalled = true
|
||||||
|
|
||||||
|
// Report an error if any failed in the batch
|
||||||
|
if errorTag != "" {
|
||||||
|
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.Debugf(b.f, "Committed %s", desc)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// commitLoop runs the commit engine in the background
|
||||||
|
func (b *batcher) commitLoop(ctx context.Context) {
|
||||||
|
var (
|
||||||
|
items []*files.UploadSessionFinishArg // current batch of uncommitted files
|
||||||
|
results []chan<- batcherResponse // current batch of clients awaiting results
|
||||||
|
idleTimer = time.NewTimer(b.timeout)
|
||||||
|
commit = func() {
|
||||||
|
err := b.commitBatch(ctx, items, results)
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
|
||||||
|
}
|
||||||
|
items, results = nil, nil
|
||||||
|
}
|
||||||
|
)
|
||||||
|
defer b.wg.Done()
|
||||||
|
defer idleTimer.Stop()
|
||||||
|
idleTimer.Stop()
|
||||||
|
|
||||||
|
outer:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case req := <-b.in:
|
||||||
|
if req.isQuit() {
|
||||||
|
break outer
|
||||||
|
}
|
||||||
|
items = append(items, req.commitInfo)
|
||||||
|
results = append(results, req.result)
|
||||||
|
idleTimer.Stop()
|
||||||
|
if len(items) >= b.size {
|
||||||
|
commit()
|
||||||
|
} else {
|
||||||
|
idleTimer.Reset(b.timeout)
|
||||||
|
}
|
||||||
|
case <-idleTimer.C:
|
||||||
|
if len(items) > 0 {
|
||||||
|
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
|
||||||
|
commit()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
// commit any remaining items
|
||||||
|
if len(items) > 0 {
|
||||||
|
commit()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown finishes any pending batches then shuts everything down
|
||||||
|
//
|
||||||
|
// Can be called from atexit handler
|
||||||
|
func (b *batcher) Shutdown() {
|
||||||
|
if !b.Batching() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.shutOnce.Do(func() {
|
||||||
|
atexit.Unregister(b.atexit)
|
||||||
|
fs.Infof(b.f, "Committing uploads - please wait...")
|
||||||
|
// show that batcher is shutting down
|
||||||
|
close(b.closed)
|
||||||
|
// quit the commitLoop by sending a quitRequest message
|
||||||
|
//
|
||||||
|
// Note that we don't close b.in because that will
|
||||||
|
// cause write to closed channel in Commit when we are
|
||||||
|
// exiting due to a signal.
|
||||||
|
b.in <- quitRequest
|
||||||
|
b.wg.Wait()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit commits the file using a batch call, first adding it to the
|
||||||
|
// batch and then waiting for the batch to complete in a synchronous
|
||||||
|
// way if async is not set.
|
||||||
|
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
|
||||||
|
select {
|
||||||
|
case <-b.closed:
|
||||||
|
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
|
||||||
|
resp := make(chan batcherResponse, 1)
|
||||||
|
b.in <- batcherRequest{
|
||||||
|
commitInfo: commitInfo,
|
||||||
|
result: resp,
|
||||||
|
}
|
||||||
|
// If running async then don't wait for the result
|
||||||
|
if b.async {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
result := <-resp
|
||||||
|
return result.entry, result.err
|
||||||
|
}
|
||||||
|
|||||||
@@ -55,7 +55,10 @@ func (d *digest) Write(p []byte) (n int, err error) {
|
|||||||
n = len(p)
|
n = len(p)
|
||||||
for len(p) > 0 {
|
for len(p) > 0 {
|
||||||
d.writtenMore = true
|
d.writtenMore = true
|
||||||
toWrite := min(bytesPerBlock-d.n, len(p))
|
toWrite := bytesPerBlock - d.n
|
||||||
|
if toWrite > len(p) {
|
||||||
|
toWrite = len(p)
|
||||||
|
}
|
||||||
_, err = d.blockHash.Write(p[:toWrite])
|
_, err = d.blockHash.Write(p[:toWrite])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(hashReturnedError)
|
panic(hashReturnedError)
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
|
|
||||||
func testChunk(t *testing.T, chunk int) {
|
func testChunk(t *testing.T, chunk int) {
|
||||||
data := make([]byte, chunk)
|
data := make([]byte, chunk)
|
||||||
for i := range chunk {
|
for i := 0; i < chunk; i++ {
|
||||||
data[i] = 'A'
|
data[i] = 'A'
|
||||||
}
|
}
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
|
|||||||
@@ -47,8 +47,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/lib/batcher"
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
@@ -92,12 +90,9 @@ const (
|
|||||||
maxFileNameLength = 255
|
maxFileNameLength = 255
|
||||||
)
|
)
|
||||||
|
|
||||||
type exportAPIFormat string
|
|
||||||
type exportExtension string // dotless
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
dropboxConfig = &oauthutil.Config{
|
dropboxConfig = &oauth2.Config{
|
||||||
Scopes: []string{
|
Scopes: []string{
|
||||||
"files.metadata.write",
|
"files.metadata.write",
|
||||||
"files.content.write",
|
"files.content.write",
|
||||||
@@ -112,8 +107,7 @@ var (
|
|||||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||||
// },
|
// },
|
||||||
AuthURL: dropbox.OAuthEndpoint("").AuthURL,
|
Endpoint: dropbox.OAuthEndpoint(""),
|
||||||
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
|
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
@@ -127,28 +121,10 @@ var (
|
|||||||
|
|
||||||
// Errors
|
// Errors
|
||||||
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
|
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
|
||||||
|
|
||||||
// Configure the batcher
|
|
||||||
defaultBatcherOptions = batcher.Options{
|
|
||||||
MaxBatchSize: 1000,
|
|
||||||
DefaultTimeoutSync: 500 * time.Millisecond,
|
|
||||||
DefaultTimeoutAsync: 10 * time.Second,
|
|
||||||
DefaultBatchSizeAsync: 100,
|
|
||||||
}
|
|
||||||
|
|
||||||
exportKnownAPIFormats = map[exportAPIFormat]exportExtension{
|
|
||||||
"markdown": "md",
|
|
||||||
"html": "html",
|
|
||||||
}
|
|
||||||
// Populated based on exportKnownAPIFormats
|
|
||||||
exportKnownExtensions = map[exportExtension]exportAPIFormat{}
|
|
||||||
|
|
||||||
paperExtension = ".paper"
|
|
||||||
paperTemplateExtension = ".papert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Gets an oauth config with the right scopes
|
// Gets an oauth config with the right scopes
|
||||||
func getOauthConfig(m configmap.Mapper) *oauthutil.Config {
|
func getOauthConfig(m configmap.Mapper) *oauth2.Config {
|
||||||
// If not impersonating, use standard scopes
|
// If not impersonating, use standard scopes
|
||||||
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
|
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
|
||||||
return dropboxConfig
|
return dropboxConfig
|
||||||
@@ -176,7 +152,7 @@ func init() {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
Options: append(append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Help: fmt.Sprintf(`Upload chunk size (< %v).
|
Help: fmt.Sprintf(`Upload chunk size (< %v).
|
||||||
|
|
||||||
@@ -231,12 +207,71 @@ are supported.
|
|||||||
|
|
||||||
Note that we don't unmount the shared folder afterwards so the
|
Note that we don't unmount the shared folder afterwards so the
|
||||||
--dropbox-shared-folders can be omitted after the first use of a particular
|
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||||
shared folder.
|
shared folder.`,
|
||||||
|
|
||||||
See also --dropbox-root-namespace for an alternative way to work with shared
|
|
||||||
folders.`,
|
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "batch_mode",
|
||||||
|
Help: `Upload file batching sync|async|off.
|
||||||
|
|
||||||
|
This sets the batch mode used by rclone.
|
||||||
|
|
||||||
|
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
|
||||||
|
|
||||||
|
This has 3 possible values
|
||||||
|
|
||||||
|
- off - no batching
|
||||||
|
- sync - batch uploads and check completion (default)
|
||||||
|
- async - batch upload and don't check completion
|
||||||
|
|
||||||
|
Rclone will close any outstanding batches when it exits which may make
|
||||||
|
a delay on quit.
|
||||||
|
`,
|
||||||
|
Default: "sync",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "batch_size",
|
||||||
|
Help: `Max number of files in upload batch.
|
||||||
|
|
||||||
|
This sets the batch size of files to upload. It has to be less than 1000.
|
||||||
|
|
||||||
|
By default this is 0 which means rclone which calculate the batch size
|
||||||
|
depending on the setting of batch_mode.
|
||||||
|
|
||||||
|
- batch_mode: async - default batch_size is 100
|
||||||
|
- batch_mode: sync - default batch_size is the same as --transfers
|
||||||
|
- batch_mode: off - not in use
|
||||||
|
|
||||||
|
Rclone will close any outstanding batches when it exits which may make
|
||||||
|
a delay on quit.
|
||||||
|
|
||||||
|
Setting this is a great idea if you are uploading lots of small files
|
||||||
|
as it will make them a lot quicker. You can use --transfers 32 to
|
||||||
|
maximise throughput.
|
||||||
|
`,
|
||||||
|
Default: 0,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "batch_timeout",
|
||||||
|
Help: `Max time to allow an idle upload batch before uploading.
|
||||||
|
|
||||||
|
If an upload batch is idle for more than this long then it will be
|
||||||
|
uploaded.
|
||||||
|
|
||||||
|
The default for this is 0 which means rclone will choose a sensible
|
||||||
|
default based on the batch_mode in use.
|
||||||
|
|
||||||
|
- batch_mode: async - default batch_timeout is 10s
|
||||||
|
- batch_mode: sync - default batch_timeout is 500ms
|
||||||
|
- batch_mode: off - not in use
|
||||||
|
`,
|
||||||
|
Default: fs.Duration(0),
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "batch_commit_timeout",
|
||||||
|
Help: `Max time to wait for a batch to finish committing`,
|
||||||
|
Default: fs.Duration(10 * time.Minute),
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "pacer_min_sleep",
|
Name: "pacer_min_sleep",
|
||||||
Default: defaultMinSleep,
|
Default: defaultMinSleep,
|
||||||
@@ -255,66 +290,23 @@ folders.`,
|
|||||||
encoder.EncodeDel |
|
encoder.EncodeDel |
|
||||||
encoder.EncodeRightSpace |
|
encoder.EncodeRightSpace |
|
||||||
encoder.EncodeInvalidUtf8,
|
encoder.EncodeInvalidUtf8,
|
||||||
}, {
|
}}...),
|
||||||
Name: "root_namespace",
|
|
||||||
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
|
|
||||||
Default: "",
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "export_formats",
|
|
||||||
Help: `Comma separated list of preferred formats for exporting files
|
|
||||||
|
|
||||||
Certain Dropbox files can only be accessed by exporting them to another format.
|
|
||||||
These include Dropbox Paper documents.
|
|
||||||
|
|
||||||
For each such file, rclone will choose the first format on this list that Dropbox
|
|
||||||
considers valid. If none is valid, it will choose Dropbox's default format.
|
|
||||||
|
|
||||||
Known formats include: "html", "md" (markdown)`,
|
|
||||||
Default: fs.CommaSepList{"html", "md"},
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "skip_exports",
|
|
||||||
Help: "Skip exportable files in all listings.\n\nIf given, exportable files practically become invisible to rclone.",
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "show_all_exports",
|
|
||||||
Default: false,
|
|
||||||
Help: `Show all exportable files in listings.
|
|
||||||
|
|
||||||
Adding this flag will allow all exportable files to be server side copied.
|
|
||||||
Note that rclone doesn't add extensions to the exportable file names in this mode.
|
|
||||||
|
|
||||||
Do **not** use this flag when trying to download exportable files - rclone
|
|
||||||
will fail to download them.
|
|
||||||
`,
|
|
||||||
Advanced: true,
|
|
||||||
},
|
|
||||||
}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
|
|
||||||
})
|
})
|
||||||
|
|
||||||
for apiFormat, ext := range exportKnownAPIFormats {
|
|
||||||
exportKnownExtensions[ext] = apiFormat
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
Impersonate string `config:"impersonate"`
|
Impersonate string `config:"impersonate"`
|
||||||
SharedFiles bool `config:"shared_files"`
|
SharedFiles bool `config:"shared_files"`
|
||||||
SharedFolders bool `config:"shared_folders"`
|
SharedFolders bool `config:"shared_folders"`
|
||||||
BatchMode string `config:"batch_mode"`
|
BatchMode string `config:"batch_mode"`
|
||||||
BatchSize int `config:"batch_size"`
|
BatchSize int `config:"batch_size"`
|
||||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||||
AsyncBatch bool `config:"async_batch"`
|
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
|
||||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
AsyncBatch bool `config:"async_batch"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||||
RootNsid string `config:"root_namespace"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
ExportFormats fs.CommaSepList `config:"export_formats"`
|
|
||||||
SkipExports bool `config:"skip_exports"`
|
|
||||||
ShowAllExports bool `config:"show_all_exports"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote dropbox server
|
// Fs represents a remote dropbox server
|
||||||
@@ -333,19 +325,9 @@ type Fs struct {
|
|||||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||||
pacer *fs.Pacer // To pace the API calls
|
pacer *fs.Pacer // To pace the API calls
|
||||||
ns string // The namespace we are using or "" for none
|
ns string // The namespace we are using or "" for none
|
||||||
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
|
batcher *batcher // batch builder
|
||||||
exportExts []exportExtension
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type exportType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
notExport exportType = iota // a regular file
|
|
||||||
exportHide // should be hidden
|
|
||||||
exportListOnly // listable, but can't export
|
|
||||||
exportExportable // can export
|
|
||||||
)
|
|
||||||
|
|
||||||
// Object describes a dropbox object
|
// Object describes a dropbox object
|
||||||
//
|
//
|
||||||
// Dropbox Objects always have full metadata
|
// Dropbox Objects always have full metadata
|
||||||
@@ -357,9 +339,6 @@ type Object struct {
|
|||||||
bytes int64 // size of the object
|
bytes int64 // size of the object
|
||||||
modTime time.Time // time it was last modified
|
modTime time.Time // time it was last modified
|
||||||
hash string // content_hash of the object
|
hash string // content_hash of the object
|
||||||
|
|
||||||
exportType exportType
|
|
||||||
exportAPIFormat exportAPIFormat
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
// Name of the remote (as passed into NewFs)
|
||||||
@@ -382,46 +361,32 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
return f.features
|
return f.features
|
||||||
}
|
}
|
||||||
|
|
||||||
// Some specific errors which should be excluded from retries
|
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||||
func shouldRetryExclude(ctx context.Context, err error) (bool, error) {
|
// retried. It returns the err as a convenience
|
||||||
if err == nil {
|
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if fserrors.ContextError(ctx, &err) {
|
if fserrors.ContextError(ctx, &err) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
// First check for specific errors
|
if err == nil {
|
||||||
//
|
return false, err
|
||||||
// These come back from the SDK in a whole host of different
|
}
|
||||||
// error types, but there doesn't seem to be a consistent way
|
|
||||||
// of reading the error cause, so here we just check using the
|
|
||||||
// error string which isn't perfect but does the job.
|
|
||||||
errString := err.Error()
|
errString := err.Error()
|
||||||
|
// First check for specific errors
|
||||||
if strings.Contains(errString, "insufficient_space") {
|
if strings.Contains(errString, "insufficient_space") {
|
||||||
return false, fserrors.FatalError(err)
|
return false, fserrors.FatalError(err)
|
||||||
} else if strings.Contains(errString, "malformed_path") {
|
} else if strings.Contains(errString, "malformed_path") {
|
||||||
return false, fserrors.NoRetryError(err)
|
return false, fserrors.NoRetryError(err)
|
||||||
}
|
}
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
|
||||||
// retried. It returns the err as a convenience
|
|
||||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|
||||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
|
||||||
return retry, err
|
|
||||||
}
|
|
||||||
// Then handle any official Retry-After header from Dropbox's SDK
|
// Then handle any official Retry-After header from Dropbox's SDK
|
||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
case auth.RateLimitAPIError:
|
case auth.RateLimitAPIError:
|
||||||
if e.RateLimitError.RetryAfter > 0 {
|
if e.RateLimitError.RetryAfter > 0 {
|
||||||
fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter)
|
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
// Keep old behavior for backward compatibility
|
// Keep old behavior for backward compatibility
|
||||||
errString := err.Error()
|
|
||||||
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
|
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
@@ -466,7 +431,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
oldToken = strings.TrimSpace(oldToken)
|
oldToken = strings.TrimSpace(oldToken)
|
||||||
if ok && oldToken != "" && oldToken[0] != '{' {
|
if ok && oldToken != "" && oldToken[0] != '{' {
|
||||||
fs.Infof(name, "Converting token to new format")
|
fs.Infof(name, "Converting token to new format")
|
||||||
newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
||||||
@@ -486,11 +451,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
ci: ci,
|
ci: ci,
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
batcherOptions := defaultBatcherOptions
|
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
|
||||||
batcherOptions.Mode = f.opt.BatchMode
|
|
||||||
batcherOptions.Size = f.opt.BatchSize
|
|
||||||
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
|
|
||||||
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -500,14 +461,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
HeaderGenerator: f.headerGenerator,
|
HeaderGenerator: f.headerGenerator,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, e := range opt.ExportFormats {
|
|
||||||
ext := exportExtension(e)
|
|
||||||
if exportKnownExtensions[ext] == "" {
|
|
||||||
return nil, fmt.Errorf("dropbox: unknown export format '%s'", e)
|
|
||||||
}
|
|
||||||
f.exportExts = append(f.exportExts, ext)
|
|
||||||
}
|
|
||||||
|
|
||||||
// unauthorized config for endpoints that fail with auth
|
// unauthorized config for endpoints that fail with auth
|
||||||
ucfg := dropbox.Config{
|
ucfg := dropbox.Config{
|
||||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||||
@@ -525,15 +478,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
members := []*team.UserSelectorArg{&user}
|
members := []*team.UserSelectorArg{&user}
|
||||||
args := team.NewMembersGetInfoArgs(members)
|
args := team.NewMembersGetInfoArgs(members)
|
||||||
|
|
||||||
memberIDs, err := f.team.MembersGetInfo(args)
|
memberIds, err := f.team.MembersGetInfo(args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
||||||
}
|
}
|
||||||
if len(memberIDs) == 0 || memberIDs[0].MemberInfo == nil || memberIDs[0].MemberInfo.Profile == nil {
|
if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
|
||||||
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
|
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.AsMemberID = memberIDs[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||||
}
|
}
|
||||||
|
|
||||||
f.srv = files.New(cfg)
|
f.srv = files.New(cfg)
|
||||||
@@ -599,11 +552,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
|
|
||||||
f.features.Fill(ctx, f)
|
f.features.Fill(ctx, f)
|
||||||
|
|
||||||
if f.opt.RootNsid != "" {
|
// If root starts with / then use the actual root
|
||||||
f.ns = f.opt.RootNsid
|
if strings.HasPrefix(root, "/") {
|
||||||
fs.Debugf(f, "Overriding root namespace to %q", f.ns)
|
|
||||||
} else if strings.HasPrefix(root, "/") {
|
|
||||||
// If root starts with / then use the actual root
|
|
||||||
var acc *users.FullAccount
|
var acc *users.FullAccount
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
acc, err = f.users.GetCurrentAccount()
|
acc, err = f.users.GetCurrentAccount()
|
||||||
@@ -660,126 +610,38 @@ func (f *Fs) setRoot(root string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type getMetadataResult struct {
|
|
||||||
entry files.IsMetadata
|
|
||||||
notFound bool
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// getMetadata gets the metadata for a file or directory
|
// getMetadata gets the metadata for a file or directory
|
||||||
func (f *Fs) getMetadata(ctx context.Context, objPath string) (res getMetadataResult) {
|
func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMetadata, notFound bool, err error) {
|
||||||
res.err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
res.entry, res.err = f.srv.GetMetadata(&files.GetMetadataArg{
|
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
|
||||||
Path: f.opt.Enc.FromStandardPath(objPath),
|
Path: f.opt.Enc.FromStandardPath(objPath),
|
||||||
})
|
})
|
||||||
return shouldRetry(ctx, res.err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if res.err != nil {
|
if err != nil {
|
||||||
switch e := res.err.(type) {
|
switch e := err.(type) {
|
||||||
case files.GetMetadataAPIError:
|
case files.GetMetadataAPIError:
|
||||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
||||||
res.notFound = true
|
notFound = true
|
||||||
res.err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get metadata such that the result would be exported with the given extension
|
|
||||||
// Return a channel that will eventually receive the metadata
|
|
||||||
func (f *Fs) getMetadataForExt(ctx context.Context, filePath string, wantExportExtension exportExtension) chan getMetadataResult {
|
|
||||||
ch := make(chan getMetadataResult, 1)
|
|
||||||
wantDownloadable := (wantExportExtension == "")
|
|
||||||
go func() {
|
|
||||||
defer close(ch)
|
|
||||||
|
|
||||||
res := f.getMetadata(ctx, filePath)
|
|
||||||
info, ok := res.entry.(*files.FileMetadata)
|
|
||||||
if !ok { // Can't check anything about file, just return what we have
|
|
||||||
ch <- res
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return notFound if downloadability or extension doesn't match
|
|
||||||
if wantDownloadable != info.IsDownloadable {
|
|
||||||
ch <- getMetadataResult{notFound: true}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !info.IsDownloadable {
|
|
||||||
_, ext := f.chooseExportFormat(info)
|
|
||||||
if ext != wantExportExtension {
|
|
||||||
ch <- getMetadataResult{notFound: true}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return our real result or error
|
|
||||||
ch <- res
|
|
||||||
}()
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
|
|
||||||
// For a given rclone-path, figure out what the Dropbox-path may be, in order of preference.
|
|
||||||
// Multiple paths might be plausible, due to export path munging.
|
|
||||||
func (f *Fs) possibleMetadatas(ctx context.Context, filePath string) (ret []<-chan getMetadataResult) {
|
|
||||||
ret = []<-chan getMetadataResult{}
|
|
||||||
|
|
||||||
// Prefer an exact match
|
|
||||||
ret = append(ret, f.getMetadataForExt(ctx, filePath, ""))
|
|
||||||
|
|
||||||
// Check if we're plausibly an export path, otherwise we're done
|
|
||||||
if f.opt.SkipExports || f.opt.ShowAllExports {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dotted := path.Ext(filePath)
|
|
||||||
if dotted == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ext := exportExtension(dotted[1:])
|
|
||||||
if exportKnownExtensions[ext] == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// We might be an export path! Try all possibilities
|
|
||||||
base := strings.TrimSuffix(filePath, dotted)
|
|
||||||
|
|
||||||
// `foo.papert.md` will only come from `foo.papert`. Never check something like `foo.papert.paper`
|
|
||||||
if strings.HasSuffix(base, paperTemplateExtension) {
|
|
||||||
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, try both `foo.md` coming from `foo`, or from `foo.paper`
|
|
||||||
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
|
|
||||||
ret = append(ret, f.getMetadataForExt(ctx, base+paperExtension, ext))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// getFileMetadata gets the metadata for a file
|
// getFileMetadata gets the metadata for a file
|
||||||
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileMetadata, error) {
|
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *files.FileMetadata, err error) {
|
||||||
var res getMetadataResult
|
entry, notFound, err := f.getMetadata(ctx, filePath)
|
||||||
|
if err != nil {
|
||||||
// Try all possible metadatas
|
return nil, err
|
||||||
possibleMetadatas := f.possibleMetadatas(ctx, filePath)
|
|
||||||
for _, ch := range possibleMetadatas {
|
|
||||||
res = <-ch
|
|
||||||
|
|
||||||
if res.err != nil {
|
|
||||||
return nil, res.err
|
|
||||||
}
|
|
||||||
if !res.notFound {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
if notFound {
|
||||||
if res.notFound {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
|
fileInfo, ok := entry.(*files.FileMetadata)
|
||||||
fileInfo, ok := res.entry.(*files.FileMetadata)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
if _, ok = res.entry.(*files.FolderMetadata); ok {
|
if _, ok = entry.(*files.FolderMetadata); ok {
|
||||||
return nil, fs.ErrorIsDir
|
return nil, fs.ErrorIsDir
|
||||||
}
|
}
|
||||||
return nil, fs.ErrorNotAFile
|
return nil, fs.ErrorNotAFile
|
||||||
@@ -788,15 +650,15 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileM
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getDirMetadata gets the metadata for a directory
|
// getDirMetadata gets the metadata for a directory
|
||||||
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (*files.FolderMetadata, error) {
|
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files.FolderMetadata, err error) {
|
||||||
res := f.getMetadata(ctx, dirPath)
|
entry, notFound, err := f.getMetadata(ctx, dirPath)
|
||||||
if res.err != nil {
|
if err != nil {
|
||||||
return nil, res.err
|
return nil, err
|
||||||
}
|
}
|
||||||
if res.notFound {
|
if notFound {
|
||||||
return nil, fs.ErrorDirNotFound
|
return nil, fs.ErrorDirNotFound
|
||||||
}
|
}
|
||||||
dirInfo, ok := res.entry.(*files.FolderMetadata)
|
dirInfo, ok := entry.(*files.FolderMetadata)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fs.ErrorIsFile
|
return nil, fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
@@ -832,7 +694,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
return f.newObjectWithInfo(ctx, remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// listSharedFolders lists all available shared folders mounted and not mounted
|
// listSharedFoldersApi lists all available shared folders mounted and not mounted
|
||||||
// we'll need the id later so we have to return them in original format
|
// we'll need the id later so we have to return them in original format
|
||||||
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
|
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||||
started := false
|
started := false
|
||||||
@@ -996,15 +858,16 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
var res *files.ListFolderResult
|
var res *files.ListFolderResult
|
||||||
for {
|
for {
|
||||||
if !started {
|
if !started {
|
||||||
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(root))
|
arg := files.ListFolderArg{
|
||||||
arg.Recursive = false
|
Path: f.opt.Enc.FromStandardPath(root),
|
||||||
arg.Limit = 1000
|
Recursive: false,
|
||||||
|
Limit: 1000,
|
||||||
|
}
|
||||||
if root == "/" {
|
if root == "/" {
|
||||||
arg.Path = "" // Specify root folder as empty string
|
arg.Path = "" // Specify root folder as empty string
|
||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
res, err = f.srv.ListFolder(arg)
|
res, err = f.srv.ListFolder(&arg)
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1057,9 +920,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if o.(*Object).exportType.listable() {
|
entries = append(entries, o)
|
||||||
entries = append(entries, o)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !res.HasMore {
|
if !res.HasMore {
|
||||||
@@ -1135,7 +996,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
if root == "/" {
|
if root == "/" {
|
||||||
return errors.New("can't remove root directory")
|
return errors.New("can't remove root directory")
|
||||||
}
|
}
|
||||||
encRoot := f.opt.Enc.FromStandardPath(root)
|
|
||||||
|
|
||||||
if check {
|
if check {
|
||||||
// check directory exists
|
// check directory exists
|
||||||
@@ -1144,15 +1004,18 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
return fmt.Errorf("Rmdir: %w", err)
|
return fmt.Errorf("Rmdir: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
root = f.opt.Enc.FromStandardPath(root)
|
||||||
// check directory empty
|
// check directory empty
|
||||||
arg := files.NewListFolderArg(encRoot)
|
arg := files.ListFolderArg{
|
||||||
arg.Recursive = false
|
Path: root,
|
||||||
|
Recursive: false,
|
||||||
|
}
|
||||||
if root == "/" {
|
if root == "/" {
|
||||||
arg.Path = "" // Specify root folder as empty string
|
arg.Path = "" // Specify root folder as empty string
|
||||||
}
|
}
|
||||||
var res *files.ListFolderResult
|
var res *files.ListFolderResult
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
res, err = f.srv.ListFolder(arg)
|
res, err = f.srv.ListFolder(&arg)
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1165,7 +1028,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
|
|
||||||
// remove it
|
// remove it
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: encRoot})
|
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
@@ -1195,20 +1058,13 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find and remove existing object
|
|
||||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer cleanup(&err)
|
|
||||||
|
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
dstObj := &Object{
|
dstObj := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
@@ -1222,6 +1078,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
|||||||
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
|
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
var err error
|
||||||
var result *files.RelocationResult
|
var result *files.RelocationResult
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
result, err = f.srv.CopyV2(&arg)
|
result, err = f.srv.CopyV2(&arg)
|
||||||
@@ -1333,16 +1190,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil && createArg.Settings.Expires != nil && strings.Contains(err.Error(), sharing.SharedLinkSettingsErrorNotAuthorized) {
|
|
||||||
// Some plans can't create links with expiry
|
|
||||||
fs.Debugf(absPath, "can't create link with expiry, trying without")
|
|
||||||
createArg.Settings.Expires = nil
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil && strings.Contains(err.Error(),
|
if err != nil && strings.Contains(err.Error(),
|
||||||
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||||
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
||||||
@@ -1434,21 +1281,18 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var total uint64
|
var total uint64
|
||||||
used := q.Used
|
|
||||||
if q.Allocation != nil {
|
if q.Allocation != nil {
|
||||||
if q.Allocation.Individual != nil {
|
if q.Allocation.Individual != nil {
|
||||||
total += q.Allocation.Individual.Allocated
|
total += q.Allocation.Individual.Allocated
|
||||||
}
|
}
|
||||||
if q.Allocation.Team != nil {
|
if q.Allocation.Team != nil {
|
||||||
total += q.Allocation.Team.Allocated
|
total += q.Allocation.Team.Allocated
|
||||||
// Override used with Team.Used as this includes q.Used already
|
|
||||||
used = q.Allocation.Team.Used
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
usage = &fs.Usage{
|
usage = &fs.Usage{
|
||||||
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
||||||
Used: fs.NewUsageValue(int64(used)), // bytes in use
|
Used: fs.NewUsageValue(int64(q.Used)), // bytes in use
|
||||||
Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
|
Free: fs.NewUsageValue(int64(total - q.Used)), // bytes which can be uploaded before reaching the quota
|
||||||
}
|
}
|
||||||
return usage, nil
|
return usage, nil
|
||||||
}
|
}
|
||||||
@@ -1507,14 +1351,16 @@ func (f *Fs) changeNotifyCursor(ctx context.Context) (cursor string, err error)
|
|||||||
var startCursor *files.ListFolderGetLatestCursorResult
|
var startCursor *files.ListFolderGetLatestCursorResult
|
||||||
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(f.slashRoot))
|
arg := files.ListFolderArg{
|
||||||
arg.Recursive = true
|
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
|
||||||
|
Recursive: true,
|
||||||
|
}
|
||||||
|
|
||||||
if arg.Path == "/" {
|
if arg.Path == "/" {
|
||||||
arg.Path = ""
|
arg.Path = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
startCursor, err = f.srv.ListFolderGetLatestCursor(arg)
|
startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
|
||||||
|
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
@@ -1618,50 +1464,8 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) chooseExportFormat(info *files.FileMetadata) (exportAPIFormat, exportExtension) {
|
|
||||||
// Find API export formats Dropbox supports for this file
|
|
||||||
// Sometimes Dropbox lists a format in ExportAs but not ExportOptions, so check both
|
|
||||||
ei := info.ExportInfo
|
|
||||||
dropboxFormatStrings := append([]string{ei.ExportAs}, ei.ExportOptions...)
|
|
||||||
|
|
||||||
// Find which extensions these correspond to
|
|
||||||
exportExtensions := map[exportExtension]exportAPIFormat{}
|
|
||||||
var dropboxPreferredAPIFormat exportAPIFormat
|
|
||||||
var dropboxPreferredExtension exportExtension
|
|
||||||
for _, format := range dropboxFormatStrings {
|
|
||||||
apiFormat := exportAPIFormat(format)
|
|
||||||
// Only consider formats we know about
|
|
||||||
if ext, ok := exportKnownAPIFormats[apiFormat]; ok {
|
|
||||||
if dropboxPreferredAPIFormat == "" {
|
|
||||||
dropboxPreferredAPIFormat = apiFormat
|
|
||||||
dropboxPreferredExtension = ext
|
|
||||||
}
|
|
||||||
exportExtensions[ext] = apiFormat
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// See if the user picked a valid extension
|
|
||||||
for _, ext := range f.exportExts {
|
|
||||||
if apiFormat, ok := exportExtensions[ext]; ok {
|
|
||||||
return apiFormat, ext
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no matches, prefer the first valid format Dropbox lists
|
|
||||||
return dropboxPreferredAPIFormat, dropboxPreferredExtension
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
func (et exportType) listable() bool {
|
|
||||||
return et != exportHide
|
|
||||||
}
|
|
||||||
|
|
||||||
// something we should _try_ to export
|
|
||||||
func (et exportType) exportable() bool {
|
|
||||||
return et == exportExportable || et == exportListOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
// Fs returns the parent Fs
|
||||||
func (o *Object) Fs() fs.Info {
|
func (o *Object) Fs() fs.Info {
|
||||||
return o.fs
|
return o.fs
|
||||||
@@ -1705,32 +1509,6 @@ func (o *Object) Size() int64 {
|
|||||||
return o.bytes
|
return o.bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) setMetadataForExport(info *files.FileMetadata) {
|
|
||||||
o.bytes = -1
|
|
||||||
o.hash = ""
|
|
||||||
|
|
||||||
if o.fs.opt.SkipExports {
|
|
||||||
o.exportType = exportHide
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if o.fs.opt.ShowAllExports {
|
|
||||||
o.exportType = exportListOnly
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var exportExt exportExtension
|
|
||||||
o.exportAPIFormat, exportExt = o.fs.chooseExportFormat(info)
|
|
||||||
if o.exportAPIFormat == "" {
|
|
||||||
o.exportType = exportHide
|
|
||||||
} else {
|
|
||||||
o.exportType = exportExportable
|
|
||||||
// get rid of any paper extension, if present
|
|
||||||
o.remote = strings.TrimSuffix(o.remote, paperExtension)
|
|
||||||
// add the export extension
|
|
||||||
o.remote += "." + string(exportExt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// setMetadataFromEntry sets the fs data from a files.FileMetadata
|
// setMetadataFromEntry sets the fs data from a files.FileMetadata
|
||||||
//
|
//
|
||||||
// This isn't a complete set of metadata and has an inaccurate date
|
// This isn't a complete set of metadata and has an inaccurate date
|
||||||
@@ -1739,10 +1517,6 @@ func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
|
|||||||
o.bytes = int64(info.Size)
|
o.bytes = int64(info.Size)
|
||||||
o.modTime = info.ClientModified
|
o.modTime = info.ClientModified
|
||||||
o.hash = info.ContentHash
|
o.hash = info.ContentHash
|
||||||
|
|
||||||
if !info.IsDownloadable {
|
|
||||||
o.setMetadataForExport(info)
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1806,27 +1580,6 @@ func (o *Object) Storable() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) export(ctx context.Context) (in io.ReadCloser, err error) {
|
|
||||||
if o.exportType == exportListOnly || o.exportAPIFormat == "" {
|
|
||||||
fs.Debugf(o.remote, "No export format found")
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
arg := files.ExportArg{Path: o.id, ExportFormat: string(o.exportAPIFormat)}
|
|
||||||
var exportResult *files.ExportResult
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
exportResult, in, err = o.fs.srv.Export(&arg)
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
o.bytes = int64(exportResult.ExportMetadata.Size)
|
|
||||||
o.hash = exportResult.ExportMetadata.ExportHash
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
if o.fs.opt.SharedFiles {
|
if o.fs.opt.SharedFiles {
|
||||||
@@ -1846,10 +1599,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if o.exportType.exportable() {
|
|
||||||
return o.export(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.FixRangeOption(options, o.bytes)
|
fs.FixRangeOption(options, o.bytes)
|
||||||
headers := fs.OpenOptionHeaders(options)
|
headers := fs.OpenOptionHeaders(options)
|
||||||
arg := files.DownloadArg{
|
arg := files.DownloadArg{
|
||||||
@@ -1973,15 +1722,19 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
|||||||
// If we are batching then we should have written all the data now
|
// If we are batching then we should have written all the data now
|
||||||
// store the commit info now for a batch commit
|
// store the commit info now for a batch commit
|
||||||
if o.fs.batcher.Batching() {
|
if o.fs.batcher.Batching() {
|
||||||
return o.fs.batcher.Commit(ctx, o.remote, args)
|
return o.fs.batcher.Commit(ctx, args)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
|
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
|
||||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
// If error is insufficient space then don't retry
|
||||||
return retry, err
|
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||||
|
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||||
|
err = fserrors.NoRetryError(err)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// after the first chunk is uploaded, we retry everything except the excluded errors
|
// after the first chunk is uploaded, we retry everything
|
||||||
return err != nil, err
|
return err != nil, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -1,16 +1,9 @@
|
|||||||
package dropbox
|
package dropbox
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestInternalCheckPathLength(t *testing.T) {
|
func TestInternalCheckPathLength(t *testing.T) {
|
||||||
@@ -49,54 +42,3 @@ func TestInternalCheckPathLength(t *testing.T) {
|
|||||||
assert.Equal(t, test.ok, err == nil, test.in)
|
assert.Equal(t, test.ok, err == nil, test.in)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) importPaperForTest(t *testing.T) {
|
|
||||||
content := `# test doc
|
|
||||||
|
|
||||||
Lorem ipsum __dolor__ sit amet
|
|
||||||
[link](http://google.com)
|
|
||||||
`
|
|
||||||
|
|
||||||
arg := files.PaperCreateArg{
|
|
||||||
Path: f.slashRootSlash + "export.paper",
|
|
||||||
ImportFormat: &files.ImportFormat{Tagged: dropbox.Tagged{Tag: files.ImportFormatMarkdown}},
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
reader := strings.NewReader(content)
|
|
||||||
_, err = f.srv.PaperCreate(&arg, reader)
|
|
||||||
return shouldRetry(context.Background(), err)
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestPaperExport(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
f.importPaperForTest(t)
|
|
||||||
|
|
||||||
f.exportExts = []exportExtension{"html"}
|
|
||||||
|
|
||||||
obj, err := f.NewObject(ctx, "export.html")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
rc, err := obj.Open(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { require.NoError(t, rc.Close()) }()
|
|
||||||
|
|
||||||
buf, err := io.ReadAll(rc)
|
|
||||||
require.NoError(t, err)
|
|
||||||
text := string(buf)
|
|
||||||
|
|
||||||
for _, excerpt := range []string{
|
|
||||||
"Lorem ipsum",
|
|
||||||
"<b>dolor</b>",
|
|
||||||
`href="http://google.com"`,
|
|
||||||
} {
|
|
||||||
require.Contains(t, text, excerpt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
t.Run("PaperExport", f.InternalTestPaperExport)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||||||
return false, err // No such user
|
return false, err // No such user
|
||||||
case 186:
|
case 186:
|
||||||
return false, err // IP blocked?
|
return false, err // IP blocked?
|
||||||
case 374, 412: // Flood detected seems to be #412 now
|
case 374:
|
||||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||||
time.Sleep(30 * time.Second)
|
time.Sleep(30 * time.Second)
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -441,28 +441,23 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
srcFs := srcObj.fs
|
|
||||||
|
|
||||||
// Find current directory ID
|
// Find current directory ID
|
||||||
srcLeaf, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcObj.remote, false)
|
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote)
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If it is in the correct directory, just rename it
|
// If it is in the correct directory, just rename it
|
||||||
var url string
|
var url string
|
||||||
if srcDirectoryID == dstDirectoryID {
|
if currentDirectoryID == directoryID {
|
||||||
// No rename needed
|
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
||||||
if srcLeaf == dstLeaf {
|
|
||||||
return src, nil
|
|
||||||
}
|
|
||||||
resp, err := f.renameFile(ctx, srcObj.file.URL, dstLeaf)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||||
}
|
}
|
||||||
@@ -471,16 +466,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
url = resp.URLs[0].URL
|
url = resp.URLs[0].URL
|
||||||
} else {
|
} else {
|
||||||
dstFolderID, err := strconv.Atoi(dstDirectoryID)
|
folderID, err := strconv.Atoi(directoryID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rename := dstLeaf
|
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||||
// No rename needed
|
|
||||||
if srcLeaf == dstLeaf {
|
|
||||||
rename = ""
|
|
||||||
}
|
|
||||||
resp, err := f.moveFile(ctx, srcObj.file.URL, dstFolderID, rename)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -216,11 +216,11 @@ var ItemFields = mustFields(Item{})
|
|||||||
|
|
||||||
// fields returns the JSON fields in use by opt as a | separated
|
// fields returns the JSON fields in use by opt as a | separated
|
||||||
// string.
|
// string.
|
||||||
func fields(opt any) (pipeTags string, err error) {
|
func fields(opt interface{}) (pipeTags string, err error) {
|
||||||
var tags []string
|
var tags []string
|
||||||
def := reflect.ValueOf(opt)
|
def := reflect.ValueOf(opt)
|
||||||
defType := def.Type()
|
defType := def.Type()
|
||||||
for i := range def.NumField() {
|
for i := 0; i < def.NumField(); i++ {
|
||||||
field := defType.Field(i)
|
field := defType.Field(i)
|
||||||
tag, ok := field.Tag.Lookup("json")
|
tag, ok := field.Tag.Lookup("json")
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -239,7 +239,7 @@ func fields(opt any) (pipeTags string, err error) {
|
|||||||
|
|
||||||
// mustFields returns the JSON fields in use by opt as a | separated
|
// mustFields returns the JSON fields in use by opt as a | separated
|
||||||
// string. It panics on failure.
|
// string. It panics on failure.
|
||||||
func mustFields(opt any) string {
|
func mustFields(opt interface{}) string {
|
||||||
tags, err := fields(opt)
|
tags, err := fields(opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@@ -351,12 +351,12 @@ type SpaceInfo struct {
|
|||||||
// DeleteResponse is returned from doDeleteFile
|
// DeleteResponse is returned from doDeleteFile
|
||||||
type DeleteResponse struct {
|
type DeleteResponse struct {
|
||||||
Status
|
Status
|
||||||
Deleted []string `json:"deleted"`
|
Deleted []string `json:"deleted"`
|
||||||
Errors []any `json:"errors"`
|
Errors []interface{} `json:"errors"`
|
||||||
ID string `json:"fi_id"`
|
ID string `json:"fi_id"`
|
||||||
BackgroundTask int `json:"backgroundtask"`
|
BackgroundTask int `json:"backgroundtask"`
|
||||||
UsSize string `json:"us_size"`
|
UsSize string `json:"us_size"`
|
||||||
PaSize string `json:"pa_size"`
|
PaSize string `json:"pa_size"`
|
||||||
//SpaceInfo SpaceInfo `json:"spaceinfo"`
|
//SpaceInfo SpaceInfo `json:"spaceinfo"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -371,7 +371,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// params for rpc
|
// params for rpc
|
||||||
type params map[string]any
|
type params map[string]interface{}
|
||||||
|
|
||||||
// rpc calls the rpc.php method of the SME file fabric
|
// rpc calls the rpc.php method of the SME file fabric
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -1,81 +0,0 @@
|
|||||||
// Package api defines types for interacting with the FileLu API.
|
|
||||||
package api
|
|
||||||
|
|
||||||
import "encoding/json"
|
|
||||||
|
|
||||||
// CreateFolderResponse represents the response for creating a folder.
|
|
||||||
type CreateFolderResponse struct {
|
|
||||||
Status int `json:"status"`
|
|
||||||
Msg string `json:"msg"`
|
|
||||||
Result struct {
|
|
||||||
FldID interface{} `json:"fld_id"`
|
|
||||||
} `json:"result"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteFolderResponse represents the response for deleting a folder.
|
|
||||||
type DeleteFolderResponse struct {
|
|
||||||
Status int `json:"status"`
|
|
||||||
Msg string `json:"msg"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// FolderListResponse represents the response for listing folders.
|
|
||||||
type FolderListResponse struct {
|
|
||||||
Status int `json:"status"`
|
|
||||||
Msg string `json:"msg"`
|
|
||||||
Result struct {
|
|
||||||
Files []struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
FldID json.Number `json:"fld_id"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
FileCode string `json:"file_code"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
} `json:"files"`
|
|
||||||
Folders []struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
FldID json.Number `json:"fld_id"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
} `json:"folders"`
|
|
||||||
} `json:"result"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileDirectLinkResponse represents the response for a direct link to a file.
|
|
||||||
type FileDirectLinkResponse struct {
|
|
||||||
Status int `json:"status"`
|
|
||||||
Msg string `json:"msg"`
|
|
||||||
Result struct {
|
|
||||||
URL string `json:"url"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
} `json:"result"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileInfoResponse represents the response for file information.
|
|
||||||
type FileInfoResponse struct {
|
|
||||||
Status int `json:"status"`
|
|
||||||
Msg string `json:"msg"`
|
|
||||||
Result []struct {
|
|
||||||
Size string `json:"size"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
FileCode string `json:"filecode"`
|
|
||||||
Hash string `json:"hash"`
|
|
||||||
Status int `json:"status"`
|
|
||||||
} `json:"result"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteFileResponse represents the response for deleting a file.
|
|
||||||
type DeleteFileResponse struct {
|
|
||||||
Status int `json:"status"`
|
|
||||||
Msg string `json:"msg"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AccountInfoResponse represents the response for account information.
|
|
||||||
type AccountInfoResponse struct {
|
|
||||||
Status int `json:"status"` // HTTP status code of the response.
|
|
||||||
Msg string `json:"msg"` // Message describing the response.
|
|
||||||
Result struct {
|
|
||||||
PremiumExpire string `json:"premium_expire"` // Expiration date of premium access.
|
|
||||||
Email string `json:"email"` // User's email address.
|
|
||||||
UType string `json:"utype"` // User type (e.g., premium or free).
|
|
||||||
Storage string `json:"storage"` // Total storage available to the user.
|
|
||||||
StorageUsed string `json:"storage_used"` // Amount of storage used.
|
|
||||||
} `json:"result"` // Nested result structure containing account details.
|
|
||||||
}
|
|
||||||
@@ -1,366 +0,0 @@
|
|||||||
// Package filelu provides an interface to the FileLu storage system.
|
|
||||||
package filelu
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Register the backend with Rclone
|
|
||||||
func init() {
|
|
||||||
fs.Register(&fs.RegInfo{
|
|
||||||
Name: "filelu",
|
|
||||||
Description: "FileLu Cloud Storage",
|
|
||||||
NewFs: NewFs,
|
|
||||||
Options: []fs.Option{{
|
|
||||||
Name: "key",
|
|
||||||
Help: "Your FileLu Rclone key from My Account",
|
|
||||||
Required: true,
|
|
||||||
Sensitive: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: config.ConfigEncoding,
|
|
||||||
Help: config.ConfigEncodingHelp,
|
|
||||||
Advanced: true,
|
|
||||||
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
|
|
||||||
encoder.EncodeSlash |
|
|
||||||
encoder.EncodeLtGt |
|
|
||||||
encoder.EncodeExclamation |
|
|
||||||
encoder.EncodeDoubleQuote |
|
|
||||||
encoder.EncodeSingleQuote |
|
|
||||||
encoder.EncodeBackQuote |
|
|
||||||
encoder.EncodeQuestion |
|
|
||||||
encoder.EncodeDollar |
|
|
||||||
encoder.EncodeColon |
|
|
||||||
encoder.EncodeAsterisk |
|
|
||||||
encoder.EncodePipe |
|
|
||||||
encoder.EncodeHash |
|
|
||||||
encoder.EncodePercent |
|
|
||||||
encoder.EncodeBackSlash |
|
|
||||||
encoder.EncodeCrLf |
|
|
||||||
encoder.EncodeDel |
|
|
||||||
encoder.EncodeCtl |
|
|
||||||
encoder.EncodeLeftSpace |
|
|
||||||
encoder.EncodeLeftPeriod |
|
|
||||||
encoder.EncodeLeftTilde |
|
|
||||||
encoder.EncodeLeftCrLfHtVt |
|
|
||||||
encoder.EncodeRightPeriod |
|
|
||||||
encoder.EncodeRightCrLfHtVt |
|
|
||||||
encoder.EncodeSquareBracket |
|
|
||||||
encoder.EncodeSemicolon |
|
|
||||||
encoder.EncodeRightSpace |
|
|
||||||
encoder.EncodeInvalidUtf8 |
|
|
||||||
encoder.EncodeDot),
|
|
||||||
},
|
|
||||||
}})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for the FileLu backend
|
|
||||||
type Options struct {
|
|
||||||
Key string `config:"key"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents the FileLu file system
|
|
||||||
type Fs struct {
|
|
||||||
name string
|
|
||||||
root string
|
|
||||||
opt Options
|
|
||||||
features *fs.Features
|
|
||||||
endpoint string
|
|
||||||
pacer *pacer.Pacer
|
|
||||||
srv *rest.Client
|
|
||||||
client *http.Client
|
|
||||||
targetFile string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs creates a new Fs object for FileLu
|
|
||||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
|
||||||
opt := new(Options)
|
|
||||||
err := configstruct.Set(m, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if opt.Key == "" {
|
|
||||||
return nil, fmt.Errorf("FileLu Rclone Key is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
client := fshttp.NewClient(ctx)
|
|
||||||
|
|
||||||
if strings.TrimSpace(root) == "" {
|
|
||||||
root = ""
|
|
||||||
}
|
|
||||||
root = strings.Trim(root, "/")
|
|
||||||
|
|
||||||
filename := ""
|
|
||||||
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
opt: *opt,
|
|
||||||
endpoint: "https://filelu.com/rclone",
|
|
||||||
client: client,
|
|
||||||
srv: rest.NewClient(client).SetRoot("https://filelu.com/rclone"),
|
|
||||||
pacer: pacer.New(),
|
|
||||||
targetFile: filename,
|
|
||||||
root: root,
|
|
||||||
}
|
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
WriteMetadata: false,
|
|
||||||
SlowHash: true,
|
|
||||||
}).Fill(ctx, f)
|
|
||||||
|
|
||||||
rootContainer, rootDirectory := rootSplit(f.root)
|
|
||||||
if rootContainer != "" && rootDirectory != "" {
|
|
||||||
// Check to see if the (container,directory) is actually an existing file
|
|
||||||
oldRoot := f.root
|
|
||||||
newRoot, leaf := path.Split(oldRoot)
|
|
||||||
f.root = strings.Trim(newRoot, "/")
|
|
||||||
_, err := f.NewObject(ctx, leaf)
|
|
||||||
if err != nil {
|
|
||||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
|
||||||
// File doesn't exist or is a directory so return old f
|
|
||||||
f.root = strings.Trim(oldRoot, "/")
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// return an error with an fs which points to the parent
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir to create directory on remote server.
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
fullPath := path.Clean(f.root + "/" + dir)
|
|
||||||
_, err := f.createFolder(ctx, fullPath)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// About provides usage statistics for the remote
|
|
||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|
||||||
accountInfo, err := f.getAccountInfo(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
totalStorage, err := parseStorageToBytes(accountInfo.Result.Storage)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse total storage: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
usedStorage, err := parseStorageToBytes(accountInfo.Result.StorageUsed)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse used storage: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &fs.Usage{
|
|
||||||
Total: fs.NewUsageValue(totalStorage), // Total bytes available
|
|
||||||
Used: fs.NewUsageValue(usedStorage), // Total bytes used
|
|
||||||
Free: fs.NewUsageValue(totalStorage - usedStorage),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge deletes the directory and all its contents
|
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|
||||||
fullPath := path.Join(f.root, dir)
|
|
||||||
if fullPath != "" {
|
|
||||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
|
||||||
}
|
|
||||||
return f.deleteFolder(ctx, fullPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// List returns a list of files and folders
|
|
||||||
// List returns a list of files and folders for the given directory
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
|
||||||
// Compose full path for API call
|
|
||||||
fullPath := path.Join(f.root, dir)
|
|
||||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
|
||||||
if fullPath == "/" {
|
|
||||||
fullPath = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
var entries fs.DirEntries
|
|
||||||
result, err := f.getFolderList(ctx, fullPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fldMap := map[string]bool{}
|
|
||||||
for _, folder := range result.Result.Folders {
|
|
||||||
fldMap[folder.FldID.String()] = true
|
|
||||||
if f.root == "" && dir == "" && strings.Contains(folder.Path, "/") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
paths := strings.Split(folder.Path, fullPath+"/")
|
|
||||||
remote := paths[0]
|
|
||||||
if len(paths) > 1 {
|
|
||||||
remote = paths[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Contains(remote, "/") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
pathsWithoutRoot := strings.Split(folder.Path, "/"+f.root+"/")
|
|
||||||
remotePathWithoutRoot := pathsWithoutRoot[0]
|
|
||||||
if len(pathsWithoutRoot) > 1 {
|
|
||||||
remotePathWithoutRoot = pathsWithoutRoot[1]
|
|
||||||
}
|
|
||||||
remotePathWithoutRoot = strings.TrimPrefix(remotePathWithoutRoot, "/")
|
|
||||||
entries = append(entries, fs.NewDir(remotePathWithoutRoot, time.Now()))
|
|
||||||
}
|
|
||||||
for _, file := range result.Result.Files {
|
|
||||||
if _, ok := fldMap[file.FldID.String()]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
remote := path.Join(dir, file.Name)
|
|
||||||
// trim leading slashes
|
|
||||||
remote = strings.TrimPrefix(remote, "/")
|
|
||||||
obj := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
size: file.Size,
|
|
||||||
modTime: time.Now(),
|
|
||||||
}
|
|
||||||
entries = append(entries, obj)
|
|
||||||
}
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put uploads a file directly to the destination folder in the FileLu storage system.
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
if src.Size() == 0 {
|
|
||||||
return nil, fs.ErrorCantUploadEmptyFiles
|
|
||||||
}
|
|
||||||
|
|
||||||
err := f.uploadFile(ctx, in, src.Remote())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
newObject := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: src.Remote(),
|
|
||||||
size: src.Size(),
|
|
||||||
modTime: src.ModTime(ctx),
|
|
||||||
}
|
|
||||||
fs.Infof(f, "Put: Successfully uploaded new file %q", src.Remote())
|
|
||||||
return newObject, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move moves the file to the specified location
|
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, destinationPath string) (fs.Object, error) {
|
|
||||||
|
|
||||||
if strings.HasPrefix(destinationPath, "/") || strings.Contains(destinationPath, ":\\") {
|
|
||||||
dir := path.Dir(destinationPath)
|
|
||||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create destination directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
reader, err := src.Open(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to open source file: %w", err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := reader.Close(); err != nil {
|
|
||||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
dest, err := os.Create(destinationPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create destination file: %w", err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := dest.Close(); err != nil {
|
|
||||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if _, err := io.Copy(dest, reader); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to copy file content: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := src.Remove(ctx); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to remove source file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
reader, err := src.Open(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to open source object: %w", err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := reader.Close(); err != nil {
|
|
||||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
err = f.uploadFile(ctx, reader, destinationPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to upload file to destination: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := src.Remove(ctx); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to delete source file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: destinationPath,
|
|
||||||
size: src.Size(),
|
|
||||||
modTime: src.ModTime(ctx),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes a directory
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
fullPath := path.Join(f.root, dir)
|
|
||||||
if fullPath != "" {
|
|
||||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 1: Check if folder is empty
|
|
||||||
listResp, err := f.getFolderList(ctx, fullPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(listResp.Result.Files) > 0 || len(listResp.Result.Folders) > 0 {
|
|
||||||
return fmt.Errorf("Rmdir: directory %q is not empty", fullPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 2: Delete the folder
|
|
||||||
return f.deleteFolder(ctx, fullPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.Purger = (*Fs)(nil)
|
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
|
||||||
_ fs.Mover = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,324 +0,0 @@
|
|||||||
package filelu
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/filelu/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
// createFolder creates a folder at the specified path.
|
|
||||||
func (f *Fs) createFolder(ctx context.Context, dirPath string) (*api.CreateFolderResponse, error) {
|
|
||||||
encodedDir := f.fromStandardPath(dirPath)
|
|
||||||
apiURL := fmt.Sprintf("%s/folder/create?folder_path=%s&key=%s",
|
|
||||||
f.endpoint,
|
|
||||||
url.QueryEscape(encodedDir),
|
|
||||||
url.QueryEscape(f.opt.Key), // assuming f.opt.Key is the correct field
|
|
||||||
)
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp *http.Response
|
|
||||||
result := api.CreateFolderResponse{}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
var innerErr error
|
|
||||||
resp, innerErr = f.client.Do(req)
|
|
||||||
return fserrors.ShouldRetry(innerErr), innerErr
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("request failed: %w", err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := resp.Body.Close(); err != nil {
|
|
||||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
err = json.NewDecoder(resp.Body).Decode(&result)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
|
||||||
}
|
|
||||||
if result.Status != 200 {
|
|
||||||
return nil, fmt.Errorf("error: %s", result.Msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Infof(f, "Successfully created folder %q with ID %v", dirPath, result.Result.FldID)
|
|
||||||
return &result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getFolderList List both files and folders in a directory.
|
|
||||||
func (f *Fs) getFolderList(ctx context.Context, path string) (*api.FolderListResponse, error) {
|
|
||||||
encodedDir := f.fromStandardPath(path)
|
|
||||||
apiURL := fmt.Sprintf("%s/folder/list?folder_path=%s&key=%s",
|
|
||||||
f.endpoint,
|
|
||||||
url.QueryEscape(encodedDir),
|
|
||||||
url.QueryEscape(f.opt.Key),
|
|
||||||
)
|
|
||||||
|
|
||||||
var body []byte
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("failed to create request: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := f.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return shouldRetry(err), fmt.Errorf("failed to list directory: %w", err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := resp.Body.Close(); err != nil {
|
|
||||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
body, err = io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("error reading response body: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return shouldRetryHTTP(resp.StatusCode), nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var response api.FolderListResponse
|
|
||||||
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&response); err != nil {
|
|
||||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
|
||||||
}
|
|
||||||
if response.Status != 200 {
|
|
||||||
if strings.Contains(response.Msg, "Folder not found") {
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("API error: %s", response.Msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
for index := range response.Result.Folders {
|
|
||||||
response.Result.Folders[index].Path = f.toStandardPath(response.Result.Folders[index].Path)
|
|
||||||
}
|
|
||||||
|
|
||||||
for index := range response.Result.Files {
|
|
||||||
response.Result.Files[index].Name = f.toStandardPath(response.Result.Files[index].Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &response, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteFolder deletes a folder at the specified path.
|
|
||||||
func (f *Fs) deleteFolder(ctx context.Context, fullPath string) error {
|
|
||||||
fullPath = f.fromStandardPath(fullPath)
|
|
||||||
deleteURL := fmt.Sprintf("%s/folder/delete?folder_path=%s&key=%s",
|
|
||||||
f.endpoint,
|
|
||||||
url.QueryEscape(fullPath),
|
|
||||||
url.QueryEscape(f.opt.Key),
|
|
||||||
)
|
|
||||||
|
|
||||||
delResp := api.DeleteFolderResponse{}
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", deleteURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
resp, err := f.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return fserrors.ShouldRetry(err), err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := resp.Body.Close(); err != nil {
|
|
||||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.Unmarshal(body, &delResp); err != nil {
|
|
||||||
return false, fmt.Errorf("error decoding delete response: %w", err)
|
|
||||||
}
|
|
||||||
if delResp.Status != 200 {
|
|
||||||
return false, fmt.Errorf("delete error: %s", delResp.Msg)
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Infof(f, "Rmdir: successfully deleted %q", fullPath)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getDirectLink of file from FileLu to download.
|
|
||||||
func (f *Fs) getDirectLink(ctx context.Context, filePath string) (string, int64, error) {
|
|
||||||
filePath = f.fromStandardPath(filePath)
|
|
||||||
apiURL := fmt.Sprintf("%s/file/direct_link?file_path=%s&key=%s",
|
|
||||||
f.endpoint,
|
|
||||||
url.QueryEscape(filePath),
|
|
||||||
url.QueryEscape(f.opt.Key),
|
|
||||||
)
|
|
||||||
|
|
||||||
result := api.FileDirectLinkResponse{}
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("failed to create request: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := f.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := resp.Body.Close(); err != nil {
|
|
||||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
|
||||||
return false, fmt.Errorf("error decoding response: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if result.Status != 200 {
|
|
||||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
return shouldRetryHTTP(resp.StatusCode), nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return result.Result.URL, result.Result.Size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteFile deletes a file based on filePath
|
|
||||||
func (f *Fs) deleteFile(ctx context.Context, filePath string) error {
|
|
||||||
filePath = f.fromStandardPath(filePath)
|
|
||||||
apiURL := fmt.Sprintf("%s/file/remove?file_path=%s&key=%s",
|
|
||||||
f.endpoint,
|
|
||||||
url.QueryEscape(filePath),
|
|
||||||
url.QueryEscape(f.opt.Key),
|
|
||||||
)
|
|
||||||
|
|
||||||
result := api.DeleteFileResponse{}
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("failed to create request: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := f.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := resp.Body.Close(); err != nil {
|
|
||||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
|
||||||
return false, fmt.Errorf("error decoding response: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if result.Status != 200 {
|
|
||||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
return shouldRetryHTTP(resp.StatusCode), nil
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// getAccountInfo retrieves account information
|
|
||||||
func (f *Fs) getAccountInfo(ctx context.Context) (*api.AccountInfoResponse, error) {
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/account/info",
|
|
||||||
Parameters: url.Values{
|
|
||||||
"key": {f.opt.Key},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var result api.AccountInfoResponse
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
_, callErr := f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return fserrors.ShouldRetry(callErr), callErr
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if result.Status != 200 {
|
|
||||||
return nil, fmt.Errorf("error: %s", result.Msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getFileInfo retrieves file information based on file code
|
|
||||||
func (f *Fs) getFileInfo(ctx context.Context, fileCode string) (*api.FileInfoResponse, error) {
|
|
||||||
u, _ := url.Parse(f.endpoint + "/file/info2")
|
|
||||||
q := u.Query()
|
|
||||||
q.Set("file_code", fileCode) // raw path — Go handles escaping properly here
|
|
||||||
q.Set("key", f.opt.Key)
|
|
||||||
u.RawQuery = q.Encode()
|
|
||||||
|
|
||||||
apiURL := f.endpoint + "/file/info2?" + u.RawQuery
|
|
||||||
|
|
||||||
var body []byte
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("failed to create request: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := f.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return shouldRetry(err), fmt.Errorf("failed to fetch file info: %w", err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := resp.Body.Close(); err != nil {
|
|
||||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
body, err = io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("error reading response body: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return shouldRetryHTTP(resp.StatusCode), nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
result := api.FileInfoResponse{}
|
|
||||||
|
|
||||||
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&result); err != nil {
|
|
||||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if result.Status != 200 || len(result.Result) == 0 {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return &result, nil
|
|
||||||
}
|
|
||||||
@@ -1,193 +0,0 @@
|
|||||||
package filelu
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// uploadFile uploads a file to FileLu
|
|
||||||
func (f *Fs) uploadFile(ctx context.Context, fileContent io.Reader, fileFullPath string) error {
|
|
||||||
directory := path.Dir(fileFullPath)
|
|
||||||
fileName := path.Base(fileFullPath)
|
|
||||||
if directory == "." {
|
|
||||||
directory = ""
|
|
||||||
}
|
|
||||||
destinationFolderPath := path.Join(f.root, directory)
|
|
||||||
if destinationFolderPath != "" {
|
|
||||||
destinationFolderPath = "/" + strings.Trim(destinationFolderPath, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
existingEntries, err := f.List(ctx, path.Dir(fileFullPath))
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, fs.ErrorDirNotFound) {
|
|
||||||
err = f.Mkdir(ctx, path.Dir(fileFullPath))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create directory: %w", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("failed to list existing files: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, entry := range existingEntries {
|
|
||||||
if entry.Remote() == fileFullPath {
|
|
||||||
_, ok := entry.(fs.Object)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the file exists but is different, remove it
|
|
||||||
filePath := "/" + strings.Trim(destinationFolderPath+"/"+fileName, "/")
|
|
||||||
err = f.deleteFile(ctx, filePath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to delete existing file: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uploadURL, sessID, err := f.getUploadServer(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to retrieve upload server: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Since the fileCode isn't used, just handle the error
|
|
||||||
if _, err := f.uploadFileWithDestination(ctx, uploadURL, sessID, fileName, fileContent, destinationFolderPath); err != nil {
|
|
||||||
return fmt.Errorf("failed to upload file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getUploadServer gets the upload server URL with proper key authentication
|
|
||||||
func (f *Fs) getUploadServer(ctx context.Context) (string, string, error) {
|
|
||||||
apiURL := fmt.Sprintf("%s/upload/server?key=%s", f.endpoint, url.QueryEscape(f.opt.Key))
|
|
||||||
|
|
||||||
var result struct {
|
|
||||||
Status int `json:"status"`
|
|
||||||
SessID string `json:"sess_id"`
|
|
||||||
Result string `json:"result"`
|
|
||||||
Msg string `json:"msg"`
|
|
||||||
}
|
|
||||||
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("failed to create request: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := f.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return shouldRetry(err), fmt.Errorf("failed to get upload server: %w", err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := resp.Body.Close(); err != nil {
|
|
||||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
|
||||||
return false, fmt.Errorf("error decoding response: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if result.Status != 200 {
|
|
||||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
return shouldRetryHTTP(resp.StatusCode), nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return result.Result, result.SessID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// uploadFileWithDestination uploads a file directly to a specified folder using file content reader.
|
|
||||||
func (f *Fs) uploadFileWithDestination(ctx context.Context, uploadURL, sessID, fileName string, fileContent io.Reader, dirPath string) (string, error) {
|
|
||||||
destinationPath := f.fromStandardPath(dirPath)
|
|
||||||
encodedFileName := f.fromStandardPath(fileName)
|
|
||||||
pr, pw := io.Pipe()
|
|
||||||
writer := multipart.NewWriter(pw)
|
|
||||||
isDeletionRequired := false
|
|
||||||
go func() {
|
|
||||||
defer func() {
|
|
||||||
if err := pw.Close(); err != nil {
|
|
||||||
fs.Logf(nil, "Failed to close: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
_ = writer.WriteField("sess_id", sessID)
|
|
||||||
_ = writer.WriteField("utype", "prem")
|
|
||||||
_ = writer.WriteField("fld_path", destinationPath)
|
|
||||||
|
|
||||||
part, err := writer.CreateFormFile("file_0", encodedFileName)
|
|
||||||
if err != nil {
|
|
||||||
pw.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.Copy(part, fileContent); err != nil {
|
|
||||||
isDeletionRequired = true
|
|
||||||
pw.CloseWithError(fmt.Errorf("failed to copy file content: %w", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := writer.Close(); err != nil {
|
|
||||||
pw.CloseWithError(fmt.Errorf("failed to close writer: %w", err))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var fileCode string
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "POST", uploadURL, pr)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("failed to create upload request: %w", err)
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
|
||||||
|
|
||||||
resp, err := f.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return shouldRetry(err), fmt.Errorf("failed to send upload request: %w", err)
|
|
||||||
}
|
|
||||||
defer respBodyClose(resp.Body)
|
|
||||||
|
|
||||||
var result []struct {
|
|
||||||
FileCode string `json:"file_code"`
|
|
||||||
FileStatus string `json:"file_status"`
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
|
||||||
return false, fmt.Errorf("failed to parse upload response: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(result) == 0 || result[0].FileStatus != "OK" {
|
|
||||||
return false, fmt.Errorf("upload failed with status: %s", result[0].FileStatus)
|
|
||||||
}
|
|
||||||
|
|
||||||
fileCode = result[0].FileCode
|
|
||||||
return shouldRetryHTTP(resp.StatusCode), nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil && isDeletionRequired {
|
|
||||||
// Attempt to delete the file if upload fails
|
|
||||||
_ = f.deleteFile(ctx, destinationPath+"/"+fileName)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fileCode, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// respBodyClose to check body response.
|
|
||||||
func respBodyClose(responseBody io.Closer) {
|
|
||||||
if cerr := responseBody.Close(); cerr != nil {
|
|
||||||
fmt.Printf("Error closing response body: %v\n", cerr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,112 +0,0 @@
|
|||||||
package filelu
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// errFileNotFound represent file not found error
|
|
||||||
var errFileNotFound error = errors.New("file not found")
|
|
||||||
|
|
||||||
// getFileCode retrieves the file code for a given file path
|
|
||||||
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {
|
|
||||||
// Prepare parent directory
|
|
||||||
parentDir := path.Dir(filePath)
|
|
||||||
|
|
||||||
// Call List to get all the files
|
|
||||||
result, err := f.getFolderList(ctx, parentDir)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, file := range result.Result.Files {
|
|
||||||
filePathFromServer := parentDir + "/" + file.Name
|
|
||||||
if parentDir == "/" {
|
|
||||||
filePathFromServer = "/" + file.Name
|
|
||||||
}
|
|
||||||
if filePath == filePathFromServer {
|
|
||||||
return file.FileCode, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", errFileNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) fromStandardPath(remote string) string {
|
|
||||||
return f.opt.Enc.FromStandardPath(remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) toStandardPath(remote string) string {
|
|
||||||
return f.opt.Enc.ToStandardPath(remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns an empty hash set, indicating no hash support
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.NewHashSet() // Properly creates an empty hash set
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the remote name
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root returns the root path
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision returns the precision of the remote
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return fs.ModTimeNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("FileLu root '%s'", f.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// isFileCode checks if a string looks like a file code
|
|
||||||
func isFileCode(s string) bool {
|
|
||||||
if len(s) != 12 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, c := range s {
|
|
||||||
if !((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9')) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldRetry(err error) bool {
|
|
||||||
return fserrors.ShouldRetry(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldRetryHTTP(code int) bool {
|
|
||||||
return code == 429 || code >= 500
|
|
||||||
}
|
|
||||||
|
|
||||||
func rootSplit(absPath string) (bucket, bucketPath string) {
|
|
||||||
// No bucket
|
|
||||||
if absPath == "" {
|
|
||||||
return "", ""
|
|
||||||
}
|
|
||||||
slash := strings.IndexRune(absPath, '/')
|
|
||||||
// Bucket but no path
|
|
||||||
if slash < 0 {
|
|
||||||
return absPath, ""
|
|
||||||
}
|
|
||||||
return absPath[:slash], absPath[slash+1:]
|
|
||||||
}
|
|
||||||
@@ -1,259 +0,0 @@
|
|||||||
package filelu
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Object describes a FileLu object
|
|
||||||
type Object struct {
|
|
||||||
fs *Fs
|
|
||||||
remote string
|
|
||||||
size int64
|
|
||||||
modTime time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject creates a new Object for the given remote path
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
var filePath string
|
|
||||||
filePath = path.Join(f.root, remote)
|
|
||||||
filePath = "/" + strings.Trim(filePath, "/")
|
|
||||||
|
|
||||||
// Get File code
|
|
||||||
fileCode, err := f.getFileCode(ctx, filePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get File info
|
|
||||||
fileInfos, err := f.getFileInfo(ctx, fileCode)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to get file info: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fileInfo := fileInfos.Result[0]
|
|
||||||
size, _ := strconv.ParseInt(fileInfo.Size, 10, 64)
|
|
||||||
|
|
||||||
returnedRemote := remote
|
|
||||||
return &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: returnedRemote,
|
|
||||||
size: size,
|
|
||||||
modTime: time.Now(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open opens the object for reading
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
|
||||||
filePath := path.Join(o.fs.root, o.remote)
|
|
||||||
// Get direct link
|
|
||||||
directLink, size, err := o.fs.getDirectLink(ctx, filePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to get direct link: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
o.size = size
|
|
||||||
|
|
||||||
// Offset and Count for range download
|
|
||||||
var offset int64
|
|
||||||
var count int64
|
|
||||||
fs.FixRangeOption(options, o.size)
|
|
||||||
for _, option := range options {
|
|
||||||
switch x := option.(type) {
|
|
||||||
case *fs.RangeOption:
|
|
||||||
offset, count = x.Decode(o.size)
|
|
||||||
if count < 0 {
|
|
||||||
count = o.size - offset
|
|
||||||
}
|
|
||||||
case *fs.SeekOption:
|
|
||||||
offset = x.Offset
|
|
||||||
count = o.size
|
|
||||||
default:
|
|
||||||
if option.Mandatory() {
|
|
||||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var reader io.ReadCloser
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", directLink, nil)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("failed to create download request: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := o.fs.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return shouldRetry(err), fmt.Errorf("failed to download file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
defer func() {
|
|
||||||
if err := resp.Body.Close(); err != nil {
|
|
||||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return false, fmt.Errorf("failed to download file: HTTP %d", resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrap the response body to handle offset and count
|
|
||||||
currentContents, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("failed to read response body: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if offset > 0 {
|
|
||||||
if offset > int64(len(currentContents)) {
|
|
||||||
return false, fmt.Errorf("offset %d exceeds file size %d", offset, len(currentContents))
|
|
||||||
}
|
|
||||||
currentContents = currentContents[offset:]
|
|
||||||
}
|
|
||||||
if count > 0 && count < int64(len(currentContents)) {
|
|
||||||
currentContents = currentContents[:count]
|
|
||||||
}
|
|
||||||
reader = io.NopCloser(bytes.NewReader(currentContents))
|
|
||||||
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return reader, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update updates the object with new data
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
if src.Size() <= 0 {
|
|
||||||
return fs.ErrorCantUploadEmptyFiles
|
|
||||||
}
|
|
||||||
|
|
||||||
err := o.fs.uploadFile(ctx, in, o.remote)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to upload file: %w", err)
|
|
||||||
}
|
|
||||||
o.size = src.Size()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove deletes the object from FileLu
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
fullPath := "/" + strings.Trim(path.Join(o.fs.root, o.remote), "/")
|
|
||||||
|
|
||||||
err := o.fs.deleteFile(ctx, fullPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fs.Infof(o.fs, "Successfully deleted file: %s", fullPath)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the MD5 hash of an object
|
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|
||||||
if t != hash.MD5 {
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileCode string
|
|
||||||
if isFileCode(o.fs.root) {
|
|
||||||
fileCode = o.fs.root
|
|
||||||
} else {
|
|
||||||
matches := regexp.MustCompile(`\((.*?)\)`).FindAllStringSubmatch(o.remote, -1)
|
|
||||||
for _, match := range matches {
|
|
||||||
if len(match) > 1 && len(match[1]) == 12 {
|
|
||||||
fileCode = match[1]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if fileCode == "" {
|
|
||||||
return "", fmt.Errorf("no valid file code found in the remote path")
|
|
||||||
}
|
|
||||||
|
|
||||||
apiURL := fmt.Sprintf("%s/file/info?file_code=%s&key=%s",
|
|
||||||
o.fs.endpoint, url.QueryEscape(fileCode), url.QueryEscape(o.fs.opt.Key))
|
|
||||||
|
|
||||||
var result struct {
|
|
||||||
Status int `json:"status"`
|
|
||||||
Msg string `json:"msg"`
|
|
||||||
Result []struct {
|
|
||||||
Hash string `json:"hash"`
|
|
||||||
} `json:"result"`
|
|
||||||
}
|
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
resp, err := o.fs.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return shouldRetry(err), err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := resp.Body.Close(); err != nil {
|
|
||||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return shouldRetryHTTP(resp.StatusCode), nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if result.Status != 200 || len(result.Result) == 0 {
|
|
||||||
return "", fmt.Errorf("error: unable to fetch hash: %s", result.Msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result.Result[0].Hash, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string representation of the object
|
|
||||||
func (o *Object) String() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote returns the remote path
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of the object
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return o.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return o.modTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the object
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
||||||
return fs.ErrorCantSetModTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable indicates whether the object is storable
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
package filelu_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests for the FileLu backend
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestFileLu:",
|
|
||||||
NilObject: nil,
|
|
||||||
SkipInvalidUTF8: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
package filelu
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// parseStorageToBytes converts a storage string (e.g., "10") to bytes
|
|
||||||
func parseStorageToBytes(storage string) (int64, error) {
|
|
||||||
var gb float64
|
|
||||||
_, err := fmt.Sscanf(storage, "%f", &gb)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("failed to parse storage: %w", err)
|
|
||||||
}
|
|
||||||
return int64(gb * 1024 * 1024 * 1024), nil
|
|
||||||
}
|
|
||||||
@@ -1,900 +0,0 @@
|
|||||||
// Package filescom provides an interface to the Files.com
|
|
||||||
// object storage system.
|
|
||||||
package filescom
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"slices"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
files_sdk "github.com/Files-com/files-sdk-go/v3"
|
|
||||||
"github.com/Files-com/files-sdk-go/v3/bundle"
|
|
||||||
"github.com/Files-com/files-sdk-go/v3/file"
|
|
||||||
file_migration "github.com/Files-com/files-sdk-go/v3/filemigration"
|
|
||||||
"github.com/Files-com/files-sdk-go/v3/folder"
|
|
||||||
"github.com/Files-com/files-sdk-go/v3/session"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
Run of rclone info
|
|
||||||
stringNeedsEscaping = []rune{
|
|
||||||
'/', '\x00'
|
|
||||||
}
|
|
||||||
maxFileLength = 512 // for 1 byte unicode characters
|
|
||||||
maxFileLength = 512 // for 2 byte unicode characters
|
|
||||||
maxFileLength = 512 // for 3 byte unicode characters
|
|
||||||
maxFileLength = 512 // for 4 byte unicode characters
|
|
||||||
canWriteUnnormalized = true
|
|
||||||
canReadUnnormalized = true
|
|
||||||
canReadRenormalized = true
|
|
||||||
canStream = true
|
|
||||||
*/
|
|
||||||
|
|
||||||
const (
|
|
||||||
minSleep = 10 * time.Millisecond
|
|
||||||
maxSleep = 2 * time.Second
|
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
|
||||||
|
|
||||||
folderNotEmpty = "processing-failure/folder-not-empty"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Register with Fs
|
|
||||||
func init() {
|
|
||||||
fs.Register(&fs.RegInfo{
|
|
||||||
Name: "filescom",
|
|
||||||
Description: "Files.com",
|
|
||||||
NewFs: NewFs,
|
|
||||||
Options: []fs.Option{
|
|
||||||
{
|
|
||||||
Name: "site",
|
|
||||||
Help: "Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com).",
|
|
||||||
}, {
|
|
||||||
Name: "username",
|
|
||||||
Help: "The username used to authenticate with Files.com.",
|
|
||||||
}, {
|
|
||||||
Name: "password",
|
|
||||||
Help: "The password used to authenticate with Files.com.",
|
|
||||||
IsPassword: true,
|
|
||||||
}, {
|
|
||||||
Name: "api_key",
|
|
||||||
Help: "The API key used to authenticate with Files.com.",
|
|
||||||
Advanced: true,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: config.ConfigEncoding,
|
|
||||||
Help: config.ConfigEncodingHelp,
|
|
||||||
Advanced: true,
|
|
||||||
Default: (encoder.Display |
|
|
||||||
encoder.EncodeBackSlash |
|
|
||||||
encoder.EncodeRightSpace |
|
|
||||||
encoder.EncodeRightCrLfHtVt |
|
|
||||||
encoder.EncodeInvalidUtf8),
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
|
||||||
type Options struct {
|
|
||||||
Site string `config:"site"`
|
|
||||||
Username string `config:"username"`
|
|
||||||
Password string `config:"password"`
|
|
||||||
APIKey string `config:"api_key"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents a remote files.com server
|
|
||||||
type Fs struct {
|
|
||||||
name string // name of this remote
|
|
||||||
root string // the path we are working on
|
|
||||||
opt Options // parsed options
|
|
||||||
features *fs.Features // optional features
|
|
||||||
fileClient *file.Client // the connection to the file API
|
|
||||||
folderClient *folder.Client // the connection to the folder API
|
|
||||||
migrationClient *file_migration.Client // the connection to the file migration API
|
|
||||||
bundleClient *bundle.Client // the connection to the bundle API
|
|
||||||
pacer *fs.Pacer // pacer for API calls
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object describes a files object
|
|
||||||
//
|
|
||||||
// Will definitely have info but maybe not meta
|
|
||||||
type Object struct {
|
|
||||||
fs *Fs // what this object is part of
|
|
||||||
remote string // The remote path
|
|
||||||
size int64 // size of the object
|
|
||||||
crc32 string // CRC32 of the object content
|
|
||||||
md5 string // MD5 of the object content
|
|
||||||
mimeType string // Content-Type of the object
|
|
||||||
modTime time.Time // modification time of the object
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// String converts this Fs to a string
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("files root '%s'", f.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode remote and turn it into an absolute path in the share
|
|
||||||
func (f *Fs) absPath(remote string) string {
|
|
||||||
return f.opt.Enc.FromStandardPath(path.Join(f.root, remote))
|
|
||||||
}
|
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
|
||||||
var retryErrorCodes = []int{
|
|
||||||
429, // Too Many Requests.
|
|
||||||
500, // Internal Server Error
|
|
||||||
502, // Bad Gateway
|
|
||||||
503, // Service Unavailable
|
|
||||||
504, // Gateway Timeout
|
|
||||||
509, // Bandwidth Limit Exceeded
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
|
||||||
// retried. It returns the err as a convenience
|
|
||||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|
||||||
if fserrors.ContextError(ctx, &err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if apiErr, ok := err.(files_sdk.ResponseError); ok {
|
|
||||||
if slices.Contains(retryErrorCodes, apiErr.HttpCode) {
|
|
||||||
fs.Debugf(nil, "Retrying API error %v", err)
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fserrors.ShouldRetry(err), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
|
||||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *files_sdk.File, err error) {
|
|
||||||
params := files_sdk.FileFindParams{
|
|
||||||
Path: f.absPath(path),
|
|
||||||
}
|
|
||||||
|
|
||||||
var file files_sdk.File
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
file, err = f.fileClient.Find(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &file, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
|
||||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|
||||||
// Parse config into Options struct
|
|
||||||
opt := new(Options)
|
|
||||||
err := configstruct.Set(m, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
root = strings.Trim(root, "/")
|
|
||||||
|
|
||||||
config, err := newClientConfig(ctx, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
root: root,
|
|
||||||
opt: *opt,
|
|
||||||
fileClient: &file.Client{Config: config},
|
|
||||||
folderClient: &folder.Client{Config: config},
|
|
||||||
migrationClient: &file_migration.Client{Config: config},
|
|
||||||
bundleClient: &bundle.Client{Config: config},
|
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
|
||||||
}
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CaseInsensitive: true,
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
ReadMimeType: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
}).Fill(ctx, f)
|
|
||||||
|
|
||||||
if f.root != "" {
|
|
||||||
info, err := f.readMetaDataForPath(ctx, "")
|
|
||||||
if err == nil && !info.IsDir() {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return f, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) {
|
|
||||||
if opt.Site != "" {
|
|
||||||
if strings.Contains(opt.Site, ".") {
|
|
||||||
config.EndpointOverride = opt.Site
|
|
||||||
} else {
|
|
||||||
config.Subdomain = opt.Site
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = url.ParseRequestURI(config.Endpoint())
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
config = config.Init().SetCustomClient(fshttp.NewClient(ctx))
|
|
||||||
|
|
||||||
if opt.APIKey != "" {
|
|
||||||
config.APIKey = opt.APIKey
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if opt.Username == "" {
|
|
||||||
err = errors.New("username not found")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if opt.Password == "" {
|
|
||||||
err = errors.New("password not found")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
opt.Password, err = obscure.Reveal(opt.Password)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
sessionClient := session.Client{Config: config}
|
|
||||||
params := files_sdk.SessionCreateParams{
|
|
||||||
Username: opt.Username,
|
|
||||||
Password: opt.Password,
|
|
||||||
}
|
|
||||||
|
|
||||||
thisSession, err := sessionClient.Create(params, files_sdk.WithContext(ctx))
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("couldn't create session: %w", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
config.SessionId = thisSession.Id
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return an Object from a path
|
|
||||||
//
|
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
|
||||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *files_sdk.File) (fs.Object, error) {
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
if file != nil {
|
|
||||||
err = o.setMetaData(file)
|
|
||||||
} else {
|
|
||||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
return f.newObjectWithInfo(ctx, remote, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
|
||||||
// entries can be returned in any order but should be for a
|
|
||||||
// complete directory.
|
|
||||||
//
|
|
||||||
// dir should be "" to list the root, and should not have
|
|
||||||
// trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
var it *folder.Iter
|
|
||||||
params := files_sdk.FolderListForParams{
|
|
||||||
Path: f.absPath(dir),
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
it, err = f.folderClient.ListFor(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for it.Next() {
|
|
||||||
item := ptr(it.File())
|
|
||||||
remote := f.opt.Enc.ToStandardPath(item.DisplayName)
|
|
||||||
remote = path.Join(dir, remote)
|
|
||||||
if remote == dir {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if item.IsDir() {
|
|
||||||
d := fs.NewDir(remote, item.ModTime())
|
|
||||||
entries = append(entries, d)
|
|
||||||
} else {
|
|
||||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
entries = append(entries, o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = it.Err()
|
|
||||||
if files_sdk.IsNotExist(err) {
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates from the parameters passed in a half finished Object which
|
|
||||||
// must have setMetaData called on it
|
|
||||||
//
|
|
||||||
// Returns the object and error.
|
|
||||||
//
|
|
||||||
// Used to create new objects
|
|
||||||
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, err error) {
|
|
||||||
// Create the directory for the object if it doesn't exist
|
|
||||||
err = f.mkParentDir(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Temporary Object under construction
|
|
||||||
o = &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put the object
|
|
||||||
//
|
|
||||||
// Copy the reader in to the new object which is returned.
|
|
||||||
//
|
|
||||||
// The new object may have been created if an error is returned
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
// Temporary Object under construction
|
|
||||||
fs := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: src.Remote(),
|
|
||||||
}
|
|
||||||
return fs, fs.Update(ctx, in, src, options...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
return f.Put(ctx, in, src, options...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) mkdir(ctx context.Context, path string) error {
|
|
||||||
if path == "" || path == "." {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
params := files_sdk.FolderCreateParams{
|
|
||||||
Path: path,
|
|
||||||
MkdirParents: ptr(true),
|
|
||||||
}
|
|
||||||
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err := f.folderClient.Create(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if files_sdk.IsExist(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make the parent directory of remote
|
|
||||||
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
|
||||||
return f.mkdir(ctx, path.Dir(f.absPath(remote)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
return f.mkdir(ctx, f.absPath(dir))
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
o := Object{
|
|
||||||
fs: f,
|
|
||||||
remote: dir,
|
|
||||||
}
|
|
||||||
return o.SetModTime(ctx, modTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// purgeCheck removes the root directory, if check is set then it
|
|
||||||
// refuses to do so if it has anything in
|
|
||||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|
||||||
path := f.absPath(dir)
|
|
||||||
if path == "" || path == "." {
|
|
||||||
return errors.New("can't purge root directory")
|
|
||||||
}
|
|
||||||
|
|
||||||
params := files_sdk.FileDeleteParams{
|
|
||||||
Path: path,
|
|
||||||
Recursive: ptr(!check),
|
|
||||||
}
|
|
||||||
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
err := f.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
|
||||||
// Allow for eventual consistency deletion of child objects.
|
|
||||||
if isFolderNotEmpty(err) {
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
if files_sdk.IsNotExist(err) {
|
|
||||||
return fs.ErrorDirNotFound
|
|
||||||
} else if isFolderNotEmpty(err) {
|
|
||||||
return fs.ErrorDirectoryNotEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("rmdir failed: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir deletes the root folder
|
|
||||||
//
|
|
||||||
// Returns an error if it isn't empty
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
return f.purgeCheck(ctx, dir, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision return the precision of this Fs
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
|
||||||
//
|
|
||||||
// This is stored with the remote path given.
|
|
||||||
//
|
|
||||||
// It returns the destination Object and a possible error.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dstObj fs.Object, err error) {
|
|
||||||
srcObj, ok := src.(*Object)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
|
||||||
return nil, fs.ErrorCantCopy
|
|
||||||
}
|
|
||||||
err = srcObj.readMetaData(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
srcPath := srcObj.fs.absPath(srcObj.remote)
|
|
||||||
dstPath := f.absPath(remote)
|
|
||||||
if strings.EqualFold(srcPath, dstPath) {
|
|
||||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create temporary object
|
|
||||||
dstObj, err = f.createObject(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy the object
|
|
||||||
params := files_sdk.FileCopyParams{
|
|
||||||
Path: srcPath,
|
|
||||||
Destination: dstPath,
|
|
||||||
Overwrite: ptr(true),
|
|
||||||
}
|
|
||||||
|
|
||||||
var action files_sdk.FileAction
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
action, err = f.fileClient.Copy(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.waitForAction(ctx, action, "copy")
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = dstObj.SetModTime(ctx, srcObj.modTime)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge deletes all the files and the container
|
|
||||||
//
|
|
||||||
// Optional interface: Only implement this if you have a way of
|
|
||||||
// deleting all the files quicker than just running Remove() on the
|
|
||||||
// result of List()
|
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|
||||||
return f.purgeCheck(ctx, dir, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// move a file or folder
|
|
||||||
func (f *Fs) move(ctx context.Context, src *Fs, srcRemote string, dstRemote string) (info *files_sdk.File, err error) {
|
|
||||||
// Move the object
|
|
||||||
params := files_sdk.FileMoveParams{
|
|
||||||
Path: src.absPath(srcRemote),
|
|
||||||
Destination: f.absPath(dstRemote),
|
|
||||||
}
|
|
||||||
|
|
||||||
var action files_sdk.FileAction
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
action, err = f.fileClient.Move(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.waitForAction(ctx, action, "move")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info, err = f.readMetaDataForPath(ctx, dstRemote)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) waitForAction(ctx context.Context, action files_sdk.FileAction, operation string) (err error) {
|
|
||||||
var migration files_sdk.FileMigration
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
migration, err = f.migrationClient.Wait(action, func(migration files_sdk.FileMigration) {
|
|
||||||
// noop
|
|
||||||
}, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err == nil && migration.Status != "completed" {
|
|
||||||
return fmt.Errorf("%v did not complete successfully: %v", operation, migration.Status)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
|
||||||
//
|
|
||||||
// This is stored with the remote path given.
|
|
||||||
//
|
|
||||||
// It returns the destination Object and a possible error.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
|
||||||
srcObj, ok := src.(*Object)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
|
||||||
return nil, fs.ErrorCantMove
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create temporary object
|
|
||||||
dstObj, err := f.createObject(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do the move
|
|
||||||
info, err := f.move(ctx, srcObj.fs, srcObj.remote, dstObj.remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = dstObj.setMetaData(info)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dstObj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
|
||||||
// using server-side move operations.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
|
||||||
//
|
|
||||||
// If destination exists then return fs.ErrorDirExists
|
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|
||||||
srcFs, ok := src.(*Fs)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
|
||||||
return fs.ErrorCantDirMove
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if destination exists
|
|
||||||
_, err = f.readMetaDataForPath(ctx, dstRemote)
|
|
||||||
if err == nil {
|
|
||||||
return fs.ErrorDirExists
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create temporary object
|
|
||||||
dstObj, err := f.createObject(ctx, dstRemote)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do the move
|
|
||||||
_, err = f.move(ctx, srcFs, srcRemote, dstObj.remote)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
|
||||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (url string, err error) {
|
|
||||||
params := files_sdk.BundleCreateParams{
|
|
||||||
Paths: []string{f.absPath(remote)},
|
|
||||||
}
|
|
||||||
if expire < fs.DurationOff {
|
|
||||||
params.ExpiresAt = ptr(time.Now().Add(time.Duration(expire)))
|
|
||||||
}
|
|
||||||
|
|
||||||
var bundle files_sdk.Bundle
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
bundle, err = f.bundleClient.Create(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
url = bundle.Url
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.NewHashSet(hash.CRC32, hash.MD5)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a string version
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote returns the remote path
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|
||||||
switch t {
|
|
||||||
case hash.CRC32:
|
|
||||||
if o.crc32 == "" {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%08s", o.crc32), nil
|
|
||||||
case hash.MD5:
|
|
||||||
return o.md5, nil
|
|
||||||
}
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of an object in bytes
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return o.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// setMetaData sets the metadata from info
|
|
||||||
func (o *Object) setMetaData(file *files_sdk.File) error {
|
|
||||||
o.modTime = file.ModTime()
|
|
||||||
|
|
||||||
if !file.IsDir() {
|
|
||||||
o.size = file.Size
|
|
||||||
o.crc32 = file.Crc32
|
|
||||||
o.md5 = file.Md5
|
|
||||||
o.mimeType = file.MimeType
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
|
||||||
//
|
|
||||||
// it also sets the info
|
|
||||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|
||||||
file, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
|
||||||
if err != nil {
|
|
||||||
if files_sdk.IsNotExist(err) {
|
|
||||||
return fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if file.IsDir() {
|
|
||||||
return fs.ErrorIsDir
|
|
||||||
}
|
|
||||||
return o.setMetaData(file)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
|
||||||
//
|
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
|
||||||
// LastModified returned in the http headers
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return o.modTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
|
||||||
params := files_sdk.FileUpdateParams{
|
|
||||||
Path: o.fs.absPath(o.remote),
|
|
||||||
ProvidedMtime: &modTime,
|
|
||||||
}
|
|
||||||
|
|
||||||
var file files_sdk.File
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
file, err = o.fs.fileClient.Update(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return o.setMetaData(&file)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable returns a boolean showing whether this object storable
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open an object for read
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|
||||||
// Offset and Count for range download
|
|
||||||
var offset, count int64
|
|
||||||
fs.FixRangeOption(options, o.size)
|
|
||||||
for _, option := range options {
|
|
||||||
switch x := option.(type) {
|
|
||||||
case *fs.RangeOption:
|
|
||||||
offset, count = x.Decode(o.size)
|
|
||||||
if count < 0 {
|
|
||||||
count = o.size - offset
|
|
||||||
}
|
|
||||||
case *fs.SeekOption:
|
|
||||||
offset = x.Offset
|
|
||||||
count = o.size - offset
|
|
||||||
default:
|
|
||||||
if option.Mandatory() {
|
|
||||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
params := files_sdk.FileDownloadParams{
|
|
||||||
Path: o.fs.absPath(o.remote),
|
|
||||||
}
|
|
||||||
|
|
||||||
headers := &http.Header{}
|
|
||||||
headers.Set("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+count-1))
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
_, err = o.fs.fileClient.Download(
|
|
||||||
params,
|
|
||||||
files_sdk.WithContext(ctx),
|
|
||||||
files_sdk.RequestHeadersOption(headers),
|
|
||||||
files_sdk.ResponseBodyOption(func(closer io.ReadCloser) error {
|
|
||||||
in = closer
|
|
||||||
return err
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a pointer to t - useful for returning pointers to constants
|
|
||||||
func ptr[T any](t T) *T {
|
|
||||||
return &t
|
|
||||||
}
|
|
||||||
|
|
||||||
func isFolderNotEmpty(err error) bool {
|
|
||||||
var re files_sdk.ResponseError
|
|
||||||
ok := errors.As(err, &re)
|
|
||||||
return ok && re.Type == folderNotEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the object with the contents of the io.Reader, modTime and size
|
|
||||||
//
|
|
||||||
// If existing is set then it updates the object rather than creating a new one.
|
|
||||||
//
|
|
||||||
// The new object may have been created if an error is returned.
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
uploadOpts := []file.UploadOption{
|
|
||||||
file.UploadWithContext(ctx),
|
|
||||||
file.UploadWithReader(in),
|
|
||||||
file.UploadWithDestinationPath(o.fs.absPath(o.remote)),
|
|
||||||
file.UploadWithProvidedMtime(src.ModTime(ctx)),
|
|
||||||
}
|
|
||||||
|
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
err := o.fs.fileClient.Upload(uploadOpts...)
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.readMetaData(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
params := files_sdk.FileDeleteParams{
|
|
||||||
Path: o.fs.absPath(o.remote),
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
err := o.fs.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
|
||||||
func (o *Object) MimeType(ctx context.Context) string {
|
|
||||||
return o.mimeType
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.Purger = (*Fs)(nil)
|
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
|
||||||
_ fs.Copier = (*Fs)(nil)
|
|
||||||
_ fs.Mover = (*Fs)(nil)
|
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
|
||||||
_ fs.MimeTyper = (*Object)(nil)
|
|
||||||
)
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user