mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
1 Commits
fix-5835-c
...
fix-b2-err
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ee87bf19c8 |
11
.github/ISSUE_TEMPLATE/Bug.md
vendored
11
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -9,7 +9,7 @@ We understand you are having a problem with rclone; we want to help you with tha
|
||||
|
||||
**STOP and READ**
|
||||
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
|
||||
Please show the effort you've put into solving the problem and please be specific.
|
||||
Please show the effort you've put in to solving the problem and please be specific.
|
||||
People are volunteering their time to help! Low effort posts are not likely to get good answers!
|
||||
|
||||
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
|
||||
@@ -37,6 +37,7 @@ The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
|
||||
#### The associated forum post URL from `https://forum.rclone.org`
|
||||
|
||||
|
||||
@@ -64,11 +65,3 @@ The Rclone Developers
|
||||
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
|
||||
<!--- Please keep the note below for others who read your bug report. -->
|
||||
|
||||
#### How to use GitHub
|
||||
|
||||
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
||||
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
||||
* Subscribe to receive notifications on status change and new comments.
|
||||
|
||||
9
.github/ISSUE_TEMPLATE/Feature.md
vendored
9
.github/ISSUE_TEMPLATE/Feature.md
vendored
@@ -26,6 +26,7 @@ The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
|
||||
#### The associated forum post URL from `https://forum.rclone.org`
|
||||
|
||||
|
||||
@@ -41,11 +42,3 @@ The Rclone Developers
|
||||
#### How do you think rclone should be changed to solve that?
|
||||
|
||||
|
||||
|
||||
<!--- Please keep the note below for others who read your feature request. -->
|
||||
|
||||
#### How to use GitHub
|
||||
|
||||
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
||||
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
||||
* Subscribe to receive notifications on status change and new comments.
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -22,7 +22,7 @@ Link issues and relevant forum posts here.
|
||||
|
||||
#### Checklist
|
||||
|
||||
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-new-feature-or-bug-fix).
|
||||
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||
- [ ] I have added tests for all changes in this PR if appropriate.
|
||||
- [ ] I have added documentation for the changes if appropriate.
|
||||
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||
|
||||
233
.github/workflows/build.yml
vendored
233
.github/workflows/build.yml
vendored
@@ -12,36 +12,29 @@ on:
|
||||
tags:
|
||||
- '*'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
required: true
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.15', 'go1.16']
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.16.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
librclonetest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macOS-latest
|
||||
go: '1.17.x'
|
||||
go: '1.16.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -50,14 +43,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macOS-latest
|
||||
go: '1.17.x'
|
||||
go: '1.16.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.17.x'
|
||||
go: '1.16.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
@@ -67,7 +60,7 @@ jobs:
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.17.x'
|
||||
go: '1.16.x'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
@@ -78,20 +71,25 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.16.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.15
|
||||
- job_name: go1.13
|
||||
os: ubuntu-latest
|
||||
go: '1.15.x'
|
||||
go: '1.13.x'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.14
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.16
|
||||
- job_name: go1.15
|
||||
os: ubuntu-latest
|
||||
go: '1.16.x'
|
||||
go: '1.15.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
@@ -189,13 +187,12 @@ jobs:
|
||||
make racequicktest
|
||||
if: matrix.racequicktest
|
||||
|
||||
- name: Run librclone tests
|
||||
- name: Code quality test
|
||||
shell: bash
|
||||
run: |
|
||||
make -C librclone/ctest test
|
||||
make -C librclone/ctest clean
|
||||
librclone/python/test_rclone.py
|
||||
if: matrix.librclonetest
|
||||
make build_dep
|
||||
make check
|
||||
if: matrix.check
|
||||
|
||||
- name: Compile all architectures test
|
||||
shell: bash
|
||||
@@ -216,126 +213,98 @@ jobs:
|
||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
lint:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Code quality test
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: latest
|
||||
|
||||
android:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go 1.14
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.14
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go 1.16
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
|
||||
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||
- name: build native rclone
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: build native rclone
|
||||
run: |
|
||||
make
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
- name: arm-v7a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||
|
||||
- name: install gomobile
|
||||
run: |
|
||||
go get golang.org/x/mobile/cmd/gobind
|
||||
go get golang.org/x/mobile/cmd/gomobile
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
- name: arm64-v8a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
- name: arm-v7a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
- name: x86 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||
|
||||
- name: arm64-v8a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
- name: x64 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||
|
||||
- name: x86 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x64 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
make ci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
make ci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
@@ -7,7 +7,6 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
|
||||
@@ -6,7 +6,6 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
@@ -32,28 +31,3 @@ jobs:
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -11,7 +11,3 @@ rclone.iml
|
||||
*.log
|
||||
*.iml
|
||||
fuzz-build.zip
|
||||
*.orig
|
||||
*.rej
|
||||
Thumbs.db
|
||||
__pycache__
|
||||
|
||||
@@ -5,7 +5,7 @@ linters:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- revive
|
||||
- golint
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
@@ -24,7 +24,3 @@ issues:
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 10m
|
||||
|
||||
198
CONTRIBUTING.md
198
CONTRIBUTING.md
@@ -12,164 +12,95 @@ When filing an issue, please include the following information if
|
||||
possible as well as a description of the problem. Make sure you test
|
||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
|
||||
* Rclone version (e.g. output from `rclone version`)
|
||||
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
||||
* Rclone version (e.g. output from `rclone -V`)
|
||||
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
|
||||
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||
|
||||
## Submitting a new feature or bug fix ##
|
||||
## Submitting a pull request ##
|
||||
|
||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||
like to implement then please submit a pull request via GitHub.
|
||||
|
||||
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
|
||||
If it is a big feature then make an issue first so it can be discussed.
|
||||
|
||||
To prepare your pull request first press the fork button on [rclone's GitHub
|
||||
You'll need a Go environment set up with GOPATH set. See [the Go
|
||||
getting started docs](https://golang.org/doc/install) for more info.
|
||||
|
||||
First in your web browser press the fork button on [rclone's GitHub
|
||||
page](https://github.com/rclone/rclone).
|
||||
|
||||
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
||||
|
||||
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
|
||||
Now in your terminal
|
||||
|
||||
git clone https://github.com/rclone/rclone.git
|
||||
cd rclone
|
||||
git remote rename origin upstream
|
||||
# if you have SSH keys setup in your GitHub account:
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
# otherwise:
|
||||
git remote add origin https://github.com/YOURUSER/rclone.git
|
||||
|
||||
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
|
||||
|
||||
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
||||
|
||||
go version
|
||||
|
||||
Great, you can now compile and execute your own version of rclone:
|
||||
|
||||
go build
|
||||
./rclone version
|
||||
|
||||
(Note that you can also replace `go build` with `make`, which will include a
|
||||
more accurate version number in the executable as well as enable you to specify
|
||||
more build options.) Finally make a branch to add your new feature
|
||||
Make a branch to add your new feature
|
||||
|
||||
git checkout -b my-new-feature
|
||||
|
||||
And get hacking.
|
||||
|
||||
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
|
||||
When ready - run the unit tests for the code you changed
|
||||
|
||||
When ready - test the affected functionality and run the unit tests for the code you changed
|
||||
|
||||
cd folder/with/changed/files
|
||||
go test -v
|
||||
|
||||
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
||||
of the unit tests.
|
||||
|
||||
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
|
||||
Note the top level Makefile targets
|
||||
|
||||
* make check
|
||||
* make test
|
||||
|
||||
Both of these will be run by Travis when you make a pull request but
|
||||
you can do this yourself locally too. These require some extra go
|
||||
packages which you can install with
|
||||
|
||||
* make build_dep
|
||||
|
||||
Make sure you
|
||||
|
||||
* Add [unit tests](#testing) for a new feature.
|
||||
* Add [documentation](#writing-documentation) for a new feature.
|
||||
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
||||
* Follow the [commit message guidelines](#commit-messages).
|
||||
* Add [unit tests](#testing) for a new feature
|
||||
* squash commits down to one per feature
|
||||
* rebase to master with `git rebase master`
|
||||
|
||||
When you are done with that push your changes to Github:
|
||||
When you are done with that
|
||||
|
||||
git push -u origin my-new-feature
|
||||
|
||||
and open the GitHub website to [create your pull
|
||||
Go to the GitHub website and click [Create pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
|
||||
You patch will get reviewed and you might get asked to fix some stuff.
|
||||
|
||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running:
|
||||
```
|
||||
git log # See how many commits you want to squash
|
||||
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
|
||||
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
|
||||
git commit # Add a new commit message.
|
||||
git push --force # Push the squashed commit to your GitHub repo.
|
||||
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
|
||||
```
|
||||
|
||||
## Using Git and Github ##
|
||||
|
||||
### Committing your changes ###
|
||||
|
||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||
|
||||
git checkout my-new-feature # To switch to your branch
|
||||
git status # To see the new and changed files
|
||||
git add FILENAME # To select FILENAME for the commit
|
||||
git status # To verify the changes to be committed
|
||||
git commit # To do the commit
|
||||
git log # To verify the commit. Use q to quit the log
|
||||
|
||||
You can modify the message or changes in the latest commit using:
|
||||
|
||||
git commit --amend
|
||||
|
||||
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
### Replacing your previously pushed commits ###
|
||||
|
||||
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
||||
|
||||
Your previously pushed commits are replaced by:
|
||||
|
||||
git push --force origin my-new-feature
|
||||
|
||||
### Basing your changes on the latest master ###
|
||||
|
||||
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||
|
||||
git checkout master
|
||||
git fetch upstream
|
||||
git merge --ff-only
|
||||
git push origin --follow-tags # optional update of your fork in GitHub
|
||||
git checkout my-new-feature
|
||||
git rebase master
|
||||
|
||||
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
### Squashing your commits ###
|
||||
|
||||
To combine your commits into one commit:
|
||||
|
||||
git log # To count the commits to squash, e.g. the last 2
|
||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
||||
git status # To check everything is as expected
|
||||
|
||||
If everything is fine, then make the new combined commit:
|
||||
|
||||
git commit # To commit the undone commits as one
|
||||
|
||||
otherwise, you may roll back using:
|
||||
|
||||
git reflog # To check that HEAD{1} is your previous state
|
||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
||||
|
||||
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
||||
|
||||
### GitHub Continuous Integration ###
|
||||
## CI for your fork ##
|
||||
|
||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||
|
||||
## Testing ##
|
||||
|
||||
### Quick testing ###
|
||||
|
||||
rclone's tests are run from the go testing framework, so at the top
|
||||
level you can run this to run all the tests.
|
||||
|
||||
go test -v ./...
|
||||
|
||||
You can also use `make`, if supported by your platform
|
||||
|
||||
make quicktest
|
||||
|
||||
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
||||
|
||||
### Backend testing ###
|
||||
|
||||
rclone contains a mixture of unit tests and integration tests.
|
||||
Because it is difficult (and in some respects pointless) to test cloud
|
||||
storage systems by mocking all their interfaces, rclone unit tests can
|
||||
@@ -203,19 +134,12 @@ project root:
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
|
||||
### Full integration testing ###
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
|
||||
make check
|
||||
make test
|
||||
|
||||
The commands may require some extra go packages which you can install with
|
||||
|
||||
make build_dep
|
||||
|
||||
The full integration tests are run daily on the integration test server. You can
|
||||
This command is run daily on the integration test server. You can
|
||||
find the results at https://pub.rclone.org/integration-tests/
|
||||
|
||||
## Code Organisation ##
|
||||
@@ -223,17 +147,16 @@ find the results at https://pub.rclone.org/integration-tests/
|
||||
Rclone code is organised into a small number of top level directories
|
||||
with modules beneath.
|
||||
|
||||
* backend - the rclone backends for interfacing to cloud providers -
|
||||
* backend - the rclone backends for interfacing to cloud providers -
|
||||
* all - import this to load all the cloud providers
|
||||
* ...providers
|
||||
* bin - scripts for use while building or maintaining rclone
|
||||
* cmd - the rclone commands
|
||||
* all - import this to load all the commands
|
||||
* ...commands
|
||||
* cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||
* docs - the documentation and website
|
||||
* content - adjust these docs only - everything else is autogenerated
|
||||
* command - these are auto-generated - edit the corresponding .go file
|
||||
* command - these are auto generated - edit the corresponding .go file
|
||||
* fs - main rclone definitions - minimal amount of code
|
||||
* accounting - bandwidth limiting and statistics
|
||||
* asyncreader - an io.Reader which reads ahead
|
||||
@@ -275,39 +198,18 @@ If you add a new general flag (not for a backend), then document it in
|
||||
alphabetical order.
|
||||
|
||||
If you add a new backend option/flag, then it should be documented in
|
||||
the source file in the `Help:` field.
|
||||
|
||||
* Start with the most important information about the option,
|
||||
as a single sentence on a single line.
|
||||
* This text will be used for the command-line flag help.
|
||||
* It will be combined with other information, such as any default value,
|
||||
and the result will look odd if not written as a single sentence.
|
||||
* It should end with a period/full stop character, which will be shown
|
||||
in docs but automatically removed when producing the flag help.
|
||||
* Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
||||
* More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
||||
* Like with docs generated from Markdown, a single line break is ignored
|
||||
and two line breaks creates a new paragraph.
|
||||
* This text will be shown to the user in `rclone config`
|
||||
and in the docs (where it will be added by `make backenddocs`,
|
||||
normally run some time before next release).
|
||||
* To create options of enumeration type use the `Examples:` field.
|
||||
* Each example value have their own `Help:` field, but they are treated
|
||||
a bit different than the main option help text. They will be shown
|
||||
as an unordered list, therefore a single line break is enough to
|
||||
create a new list item. Also, for enumeration texts like name of
|
||||
countries, it looks better without an ending period/full stop character.
|
||||
the source file in the `Help:` field. The first line of this is used
|
||||
for the flag help, the remainder is shown to the user in `rclone
|
||||
config` and is added to the docs with `make backenddocs`.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
|
||||
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
|
||||
from those during the release process. See the `make doc` and `make
|
||||
website` targets in the Makefile if you are interested in how. You
|
||||
don't need to run these when adding a feature.
|
||||
|
||||
Documentation for rclone sub commands is with their code, e.g.
|
||||
`cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
|
||||
line, without a period/full stop character at the end, as it will be
|
||||
combined unmodified with other information (such as any default value).
|
||||
`cmd/ls/ls.go`.
|
||||
|
||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||
for small changes in the docs which makes it very easy.
|
||||
@@ -350,7 +252,7 @@ And here is an example of a longer one:
|
||||
```
|
||||
mount: fix hang on errored upload
|
||||
|
||||
In certain circumstances, if an upload failed then the mount could hang
|
||||
In certain circumstances if an upload failed then the mount could hang
|
||||
indefinitely. This was fixed by closing the read pipe after the Put
|
||||
completed. This will cause the write side to return a pipe closed
|
||||
error fixing the hang.
|
||||
@@ -382,7 +284,7 @@ and `go.sum` in the same commit as your other changes.
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
GO111MODULE=on go get -u golang.org/x/crypto
|
||||
GO111MODULE=on go get -u github.com/pkg/errors
|
||||
|
||||
Check in a single commit as above.
|
||||
|
||||
@@ -425,8 +327,8 @@ Research
|
||||
Getting going
|
||||
|
||||
* Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||
* box is a good one to start from if you have a directory-based remote
|
||||
* b2 is a good one to start from if you have a bucket-based remote
|
||||
* box is a good one to start from if you have a directory based remote
|
||||
* b2 is a good one to start from if you have a bucket based remote
|
||||
* Add your remote to the imports in `backend/all/all.go`
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
|
||||
@@ -19,7 +19,7 @@ Current active maintainers of rclone are:
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
|
||||
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do.
|
||||
|
||||
## Triaging Tickets ##
|
||||
|
||||
@@ -27,15 +27,15 @@ When a ticket comes in it should be triaged. This means it should be classified
|
||||
|
||||
Rclone uses the labels like this:
|
||||
|
||||
* `bug` - a definitely verified bug
|
||||
* `bug` - a definite verified bug
|
||||
* `can't reproduce` - a problem which we can't reproduce
|
||||
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
||||
* `duplicate` - normally close these and ask the user to subscribe to the original
|
||||
* `enhancement: new remote` - a new rclone backend
|
||||
* `enhancement` - a new feature
|
||||
* `FUSE` - to do with `rclone mount` command
|
||||
* `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
|
||||
* `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
|
||||
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
|
||||
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
|
||||
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||
* `maintenance` - internal enhancement, code re-organisation, etc.
|
||||
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||
@@ -51,7 +51,7 @@ The milestones have these meanings:
|
||||
|
||||
* v1.XX - stuff we would like to fit into this release
|
||||
* v1.XX+1 - stuff we are leaving until the next release
|
||||
* Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
||||
* Soon - stuff we think is a good idea - waiting to be scheduled to a release
|
||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||
|
||||
@@ -65,7 +65,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
|
||||
|
||||
Try to process pull requests promptly!
|
||||
|
||||
Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
|
||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||
|
||||
@@ -81,15 +81,15 @@ Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
|
||||
|
||||
High impact regressions should be fixed before the next release.
|
||||
|
||||
Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||
|
||||
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
||||
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||
|
||||
## Mailing list ##
|
||||
|
||||
There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
|
||||
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups.
|
||||
|
||||
## TODO ##
|
||||
|
||||
|
||||
7466
MANUAL.html
generated
7466
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
9293
MANUAL.txt
generated
9293
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
38
Makefile
38
Makefile
@@ -104,14 +104,10 @@ showupdates:
|
||||
@echo "*** Direct dependencies that could be updated ***"
|
||||
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
|
||||
# Update direct dependencies only
|
||||
updatedirect:
|
||||
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
GO111MODULE=on go mod tidy
|
||||
|
||||
# Update direct and indirect dependencies and test dependencies
|
||||
update:
|
||||
GO111MODULE=on go get -d -u -t ./...
|
||||
GO111MODULE=on go get -u -t ./...
|
||||
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
GO111MODULE=on go mod tidy
|
||||
|
||||
# Tidy the module dependencies
|
||||
@@ -260,33 +256,3 @@ startstable:
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
# docker volume plugin
|
||||
PLUGIN_USER ?= rclone
|
||||
PLUGIN_TAG ?= latest
|
||||
PLUGIN_BASE_TAG ?= latest
|
||||
PLUGIN_ARCH ?= amd64
|
||||
PLUGIN_IMAGE := $(PLUGIN_USER)/docker-volume-rclone:$(PLUGIN_TAG)
|
||||
PLUGIN_BASE := $(PLUGIN_USER)/rclone:$(PLUGIN_BASE_TAG)
|
||||
PLUGIN_BUILD_DIR := ./build/docker-plugin
|
||||
PLUGIN_CONTRIB_DIR := ./contrib/docker-plugin/managed
|
||||
|
||||
docker-plugin-create:
|
||||
docker buildx inspect |grep -q /${PLUGIN_ARCH} || \
|
||||
docker run --rm --privileged tonistiigi/binfmt --install all
|
||||
rm -rf ${PLUGIN_BUILD_DIR}
|
||||
docker buildx build \
|
||||
--no-cache --pull \
|
||||
--build-arg BASE_IMAGE=${PLUGIN_BASE} \
|
||||
--platform linux/${PLUGIN_ARCH} \
|
||||
--output ${PLUGIN_BUILD_DIR}/rootfs \
|
||||
${PLUGIN_CONTRIB_DIR}
|
||||
cp ${PLUGIN_CONTRIB_DIR}/config.json ${PLUGIN_BUILD_DIR}
|
||||
docker plugin rm --force ${PLUGIN_IMAGE} 2>/dev/null || true
|
||||
docker plugin create ${PLUGIN_IMAGE} ${PLUGIN_BUILD_DIR}
|
||||
|
||||
docker-plugin-push:
|
||||
docker plugin push ${PLUGIN_IMAGE}
|
||||
docker plugin rm ${PLUGIN_IMAGE}
|
||||
|
||||
docker-plugin: docker-plugin-create docker-plugin-push
|
||||
|
||||
12
README.md
12
README.md
@@ -2,7 +2,7 @@
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
[Download](https://rclone.org/downloads/) |
|
||||
[Download](https://rclone.org/downloads/) |
|
||||
[Contributing](CONTRIBUTING.md) |
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
@@ -10,12 +10,12 @@
|
||||
|
||||
[](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
|
||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||
[](https://godoc.org/github.com/rclone/rclone)
|
||||
[](https://godoc.org/github.com/rclone/rclone)
|
||||
[](https://hub.docker.com/r/rclone/rclone)
|
||||
|
||||
# Rclone
|
||||
|
||||
Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
|
||||
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
|
||||
|
||||
## Storage providers
|
||||
|
||||
@@ -32,6 +32,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
@@ -59,10 +60,8 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
@@ -73,7 +72,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
|
||||
## Features
|
||||
@@ -88,6 +87,7 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
* Multi-threaded downloads to local disk
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||
|
||||
17
RELEASE.md
17
RELEASE.md
@@ -34,24 +34,13 @@ This file describes how to make the various kinds of releases
|
||||
* make startdev # make startstable for stable branch
|
||||
* # announce with forum post, twitter post, patreon post
|
||||
|
||||
## Update dependencies
|
||||
|
||||
Early in the next release cycle update the dependencies
|
||||
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* make updatedirect
|
||||
* make
|
||||
* git commit -a -v
|
||||
* make update
|
||||
* make
|
||||
* roll back any updates which didn't compile
|
||||
* git commit -a -v --amend
|
||||
|
||||
Note that `make update` updates all direct and indirect dependencies
|
||||
and there can occasionally be forwards compatibility problems with
|
||||
doing that so it may be necessary to roll back dependencies to the
|
||||
version specified by `make updatedirect` in order to get rclone to
|
||||
build.
|
||||
* git status
|
||||
* git add new files
|
||||
* git commit -a -v
|
||||
|
||||
## Making a point release
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote or path to alias.\n\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
Required: true,
|
||||
}},
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ var (
|
||||
)
|
||||
|
||||
func prepare(t *testing.T, root string) {
|
||||
configfile.Install()
|
||||
configfile.LoadConfig(context.Background())
|
||||
|
||||
// Configure the remote
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/hasher"
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
@@ -38,12 +37,10 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/seafile"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/uptobox"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
_ "github.com/rclone/rclone/backend/yandex"
|
||||
_ "github.com/rclone/rclone/backend/zoho"
|
||||
|
||||
@@ -14,15 +14,16 @@ we ignore assets completely!
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
acd "github.com/ncw/go-acd"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -69,10 +70,11 @@ func init() {
|
||||
Prefix: "acd",
|
||||
Description: "Amazon Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: acdConfig,
|
||||
})
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "checkpoint",
|
||||
@@ -81,16 +83,16 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_wait_per_gb",
|
||||
Help: `Additional time per GiB to wait after a failed complete upload to see if it appears.
|
||||
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
|
||||
|
||||
Sometimes Amazon Drive gives an error when a file has been fully
|
||||
uploaded but the file appears anyway after a little while. This
|
||||
happens sometimes for files over 1 GiB in size and nearly every time for
|
||||
files bigger than 10 GiB. This parameter controls the time rclone waits
|
||||
happens sometimes for files over 1GB in size and nearly every time for
|
||||
files bigger than 10GB. This parameter controls the time rclone waits
|
||||
for the file to appear.
|
||||
|
||||
The default value for this parameter is 3 minutes per GiB, so by
|
||||
default it will wait 3 minutes for every GiB uploaded to see if the
|
||||
The default value for this parameter is 3 minutes per GB, so by
|
||||
default it will wait 3 minutes for every GB uploaded to see if the
|
||||
file appears.
|
||||
|
||||
You can disable this feature by setting it to 0. This may cause
|
||||
@@ -110,7 +112,7 @@ in this situation.`,
|
||||
|
||||
Files this size or more will be downloaded via their "tempLink". This
|
||||
is to work around a problem with Amazon Drive which blocks downloads
|
||||
of files bigger than about 10 GiB. The default for this is 9 GiB which
|
||||
of files bigger than about 10GB. The default for this is 9GB which
|
||||
shouldn't need to be changed.
|
||||
|
||||
To download files above this threshold, rclone requests a "tempLink"
|
||||
@@ -259,7 +261,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Amazon Drive: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
||||
}
|
||||
|
||||
c := acd.NewClient(oAuthClient)
|
||||
@@ -292,13 +294,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get endpoints: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to get endpoints")
|
||||
}
|
||||
|
||||
// Get rootID
|
||||
rootInfo, err := f.getRootInfo(ctx)
|
||||
if err != nil || rootInfo.Id == nil {
|
||||
return nil, fmt.Errorf("failed to get root: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to get root")
|
||||
}
|
||||
f.trueRootID = *rootInfo.Id
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
|
||||
//go:build acd
|
||||
// +build acd
|
||||
|
||||
package amazonclouddrive_test
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -10,14 +9,12 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -25,6 +22,7 @@ import (
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -49,7 +47,9 @@ const (
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
defaultChunkSize = 4 * fs.Mebi
|
||||
defaultChunkSize = 4 * fs.MebiByte
|
||||
maxChunkSize = 100 * fs.MebiByte
|
||||
uploadConcurrency = 4
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||
// Default storage account, key and blob endpoint for emulator support,
|
||||
@@ -73,29 +73,30 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Storage Account Name.\n\nLeave blank to use SAS URL or Emulator.",
|
||||
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)",
|
||||
}, {
|
||||
Name: "service_principal_file",
|
||||
Help: `Path to file containing credentials for use with a service principal.
|
||||
|
||||
Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
|
||||
|
||||
$ az ad sp create-for-rbac --name "<name>" \
|
||||
$ az sp create-for-rbac --name "<name>" \
|
||||
--role "Storage Blob Data Owner" \
|
||||
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
|
||||
> azure-principal.json
|
||||
|
||||
See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
|
||||
See [Use Azure CLI to assign an Azure role for access to blob and queue data](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli)
|
||||
for more details.
|
||||
`,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Storage Account Key.\n\nLeave blank to use SAS URL or Emulator.",
|
||||
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)",
|
||||
}, {
|
||||
Name: "sas_url",
|
||||
Help: "SAS URL for container level access only.\n\nLeave blank if using account/key or Emulator.",
|
||||
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)",
|
||||
}, {
|
||||
Name: "use_msi",
|
||||
Help: `Use a managed service identity to authenticate (only works in Azure).
|
||||
Help: `Use a managed service identity to authenticate (only works in Azure)
|
||||
|
||||
When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
|
||||
to authenticate to Azure Storage instead of a SAS token or account key.
|
||||
@@ -108,57 +109,36 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "msi_object_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "msi_client_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "msi_mi_res_id",
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_emulator",
|
||||
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
|
||||
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
Help: "Endpoint for the service\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload (<= 256 MiB) (deprecated).",
|
||||
Help: "Cutoff for switching to chunked upload (<= 256MB). (Deprecated)",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size.
|
||||
Help: `Upload chunk size (<= 100MB).
|
||||
|
||||
Note that this is stored in memory and there may be up to
|
||||
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
|
||||
in memory.`,
|
||||
"--transfers" chunks stored at once in memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed
|
||||
links and these uploads do not fully utilize your bandwidth, then
|
||||
increasing this may help to speed up the transfers.
|
||||
|
||||
In tests, upload speed increases almost linearly with upload
|
||||
concurrency. For example to fill a gigabit pipe it may be necessary to
|
||||
raise this to 64. Note that this will use more memory.
|
||||
|
||||
Note that chunks are stored in memory and there may be up to
|
||||
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
|
||||
in memory.`,
|
||||
Default: 16,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Help: `Size of blob list.
|
||||
@@ -220,7 +200,6 @@ to start uploading.`,
|
||||
Default: memoryPoolFlushTime,
|
||||
Advanced: true,
|
||||
Help: `How often internal memory buffer pools will be flushed.
|
||||
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.`,
|
||||
}, {
|
||||
@@ -240,12 +219,12 @@ This option controls how often unused buffers will be removed from the pool.`,
|
||||
encoder.EncodeRightPeriod),
|
||||
}, {
|
||||
Name: "public_access",
|
||||
Help: "Public access level of a container: blob or container.",
|
||||
Help: "Public access level of a container: blob, container.",
|
||||
Default: string(azblob.PublicAccessNone),
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: string(azblob.PublicAccessNone),
|
||||
Help: "The container and its blobs can be accessed only with an authorized request.\nIt's a default value.",
|
||||
Help: "The container and its blobs can be accessed only with an authorized request. It's a default value",
|
||||
}, {
|
||||
Value: string(azblob.PublicAccessBlob),
|
||||
Help: "Blob data within this container can be read via anonymous request.",
|
||||
@@ -255,11 +234,6 @@ This option controls how often unused buffers will be removed from the pool.`,
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head_object",
|
||||
Help: `If set, do not do HEAD before GET when getting objects.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -276,7 +250,6 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
ArchiveTierDelete bool `config:"archive_tier_delete"`
|
||||
@@ -286,7 +259,6 @@ type Options struct {
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
PublicAccess string `config:"public_access"`
|
||||
NoHeadObject bool `config:"no_head_object"`
|
||||
}
|
||||
|
||||
// Fs represents a remote azure server
|
||||
@@ -432,9 +404,12 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.SizeSuffixBase
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
if cs > maxChunkSize {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -476,11 +451,11 @@ const azureStorageEndpoint = "https://storage.azure.com/"
|
||||
func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []byte) (azblob.TokenRefresher, error) {
|
||||
var spCredentials servicePrincipalCredentials
|
||||
if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
|
||||
return nil, fmt.Errorf("error parsing credentials from JSON file: %w", err)
|
||||
return nil, errors.Wrap(err, "error parsing credentials from JSON file")
|
||||
}
|
||||
oauthConfig, err := adal.NewOAuthConfig(azureActiveDirectoryEndpoint, spCredentials.Tenant)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating oauth config: %w", err)
|
||||
return nil, errors.Wrap(err, "error creating oauth config")
|
||||
}
|
||||
|
||||
// Create service principal token for Azure Storage.
|
||||
@@ -490,7 +465,7 @@ func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []by
|
||||
spCredentials.Password,
|
||||
azureStorageEndpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating service principal token: %w", err)
|
||||
return nil, errors.Wrap(err, "error creating service principal token")
|
||||
}
|
||||
|
||||
// Wrap token inside a refresher closure.
|
||||
@@ -543,10 +518,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("azure: chunk size: %w", err)
|
||||
return nil, errors.Wrap(err, "azure: chunk size")
|
||||
}
|
||||
if opt.ListChunkSize > maxListChunkSize {
|
||||
return nil, fmt.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||
}
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = storageDefaultBaseURL
|
||||
@@ -555,12 +530,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.AccessTier == "" {
|
||||
opt.AccessTier = string(defaultAccessTier)
|
||||
} else if !validateAccessTier(opt.AccessTier) {
|
||||
return nil, fmt.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
|
||||
return nil, errors.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
|
||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
if !validatePublicAccess((opt.PublicAccess)) {
|
||||
return nil, fmt.Errorf("Azure Blob: Supported public access level are %s and %s",
|
||||
return nil, errors.Errorf("Azure Blob: Supported public access level are %s and %s",
|
||||
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
|
||||
}
|
||||
|
||||
@@ -602,11 +577,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
case opt.UseEmulator:
|
||||
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse credentials: %w", err)
|
||||
return nil, errors.Wrapf(err, "Failed to parse credentials")
|
||||
}
|
||||
u, err = url.Parse(emulatorBlobEndpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
@@ -648,12 +623,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to acquire MSI token: %w", err)
|
||||
return nil, errors.Wrapf(err, "Failed to acquire MSI token")
|
||||
}
|
||||
|
||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
credential := azblob.NewTokenCredential(token.AccessToken, func(credential azblob.TokenCredential) time.Duration {
|
||||
fs.Debugf(f, "Token refresher called.")
|
||||
@@ -683,19 +658,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
case opt.Account != "" && opt.Key != "":
|
||||
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse credentials: %w", err)
|
||||
return nil, errors.Wrapf(err, "Failed to parse credentials")
|
||||
}
|
||||
|
||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
case opt.SASURL != "":
|
||||
u, err = url.Parse(opt.SASURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse SAS URL: %w", err)
|
||||
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
||||
}
|
||||
// use anonymous credentials in case of sas url
|
||||
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
@@ -715,17 +690,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Create a standard URL.
|
||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
// Try loading service principal credentials from file.
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening service principal credentials file: %w", err)
|
||||
return nil, errors.Wrap(err, "error opening service principal credentials file")
|
||||
}
|
||||
// Create a token refresher from service principal credentials.
|
||||
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, loadedCreds)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create a service principal token: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to create a service principal token")
|
||||
}
|
||||
options := azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}
|
||||
pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options)
|
||||
@@ -782,7 +757,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItemInternal) (fs
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if !o.fs.opt.NoHeadObject {
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1341,7 +1316,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
}
|
||||
data, err := base64.StdEncoding.DecodeString(o.md5)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to decode Content-MD5: %q: %w", o.md5, err)
|
||||
return "", errors.Wrapf(err, "Failed to decode Content-MD5: %q", o.md5)
|
||||
}
|
||||
return hex.EncodeToString(data), nil
|
||||
}
|
||||
@@ -1392,39 +1367,6 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataFromDownloadResponse(info *azblob.DownloadResponse) (err error) {
|
||||
metadata := info.NewMetadata()
|
||||
size := info.ContentLength()
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
||||
o.mimeType = info.ContentType()
|
||||
o.size = size
|
||||
o.modTime = info.LastModified()
|
||||
o.accessTier = o.AccessTier()
|
||||
o.setMetadata(metadata)
|
||||
|
||||
// If it was a Range request, the size is wrong, so correct it
|
||||
if contentRange := info.ContentRange(); contentRange != "" {
|
||||
slash := strings.IndexRune(contentRange, '/')
|
||||
if slash >= 0 {
|
||||
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
|
||||
if err == nil {
|
||||
o.size = i
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find length in %q", contentRange)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItemInternal) (err error) {
|
||||
metadata := info.Metadata
|
||||
size := *info.Properties.ContentLength
|
||||
@@ -1527,7 +1469,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var offset int64
|
||||
var count int64
|
||||
if o.AccessTier() == azblob.AccessTierArchive {
|
||||
return nil, fmt.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
|
||||
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
|
||||
}
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
@@ -1553,11 +1495,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open for download: %w", err)
|
||||
}
|
||||
err = o.decodeMetaDataFromDownloadResponse(downloadResponse)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode metadata for download: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to open for download")
|
||||
}
|
||||
in = downloadResponse.Body(azblob.RetryReaderOptions{})
|
||||
return in, nil
|
||||
@@ -1647,7 +1585,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
fs.Debugf(o, "deleting archive tier blob before updating")
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete archive blob before updating: %w", err)
|
||||
return errors.Wrap(err, "failed to delete archive blob before updating")
|
||||
}
|
||||
} else {
|
||||
return errCantUpdateArchiveTierBlobs
|
||||
@@ -1684,10 +1622,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: int(o.fs.opt.ChunkSize),
|
||||
MaxBuffers: o.fs.opt.UploadConcurrency,
|
||||
MaxBuffers: uploadConcurrency,
|
||||
Metadata: o.meta,
|
||||
BlobHTTPHeaders: httpHeaders,
|
||||
TransferManager: o.fs.newPoolWrapper(o.fs.opt.UploadConcurrency),
|
||||
TransferManager: o.fs.newPoolWrapper(uploadConcurrency),
|
||||
}
|
||||
|
||||
// Don't retry, return a retry error instead
|
||||
@@ -1740,7 +1678,7 @@ func (o *Object) AccessTier() azblob.AccessTierType {
|
||||
// SetTier performs changing object tier
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
if !validateAccessTier(tier) {
|
||||
return fmt.Errorf("Tier %s not supported by Azure Blob Storage", tier)
|
||||
return errors.Errorf("Tier %s not supported by Azure Blob Storage", tier)
|
||||
}
|
||||
|
||||
// Check if current tier already matches with desired tier
|
||||
@@ -1756,7 +1694,7 @@ func (o *Object) SetTier(tier string) error {
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to set Blob Tier: %w", err)
|
||||
return errors.Wrap(err, "Failed to set Blob Tier")
|
||||
}
|
||||
|
||||
// Set access tier on local object also, this typically
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -17,10 +16,12 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{},
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
// +build plan9 solaris js !go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -13,6 +12,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
@@ -94,7 +94,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
httpClient := fshttp.NewClient(ctx)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
|
||||
return result, errors.Wrap(err, "MSI is not enabled on this VM")
|
||||
}
|
||||
defer func() { // resp and Body should not be nil
|
||||
_, err = io.Copy(ioutil.Discard, resp.Body)
|
||||
@@ -119,7 +119,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("Couldn't read IMDS response: %w", err)
|
||||
return result, errors.Wrap(err, "Couldn't read IMDS response")
|
||||
}
|
||||
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||
@@ -130,7 +130,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
// storage API call.
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("Couldn't unmarshal IMDS response: %w", err)
|
||||
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -2,11 +2,12 @@ package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
)
|
||||
|
||||
// Error describes a B2 error response
|
||||
@@ -62,17 +63,16 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasVersion returns true if it looks like the passed filename has a timestamp on it.
|
||||
//
|
||||
// Note that the passed filename's timestamp may still be invalid even if this
|
||||
// function returns true.
|
||||
func HasVersion(remote string) bool {
|
||||
return version.Match(remote)
|
||||
}
|
||||
const versionFormat = "-v2006-01-02-150405.000"
|
||||
|
||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||
func (t Timestamp) AddVersion(remote string) string {
|
||||
return version.Add(remote, time.Time(t))
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
s := time.Time(t).Format(versionFormat)
|
||||
// Replace the '.' with a '-'
|
||||
s = strings.Replace(s, ".", "-", -1)
|
||||
return base + s + ext
|
||||
}
|
||||
|
||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||
@@ -80,9 +80,24 @@ func (t Timestamp) AddVersion(remote string) string {
|
||||
// It returns the new file name and a timestamp, or the old filename
|
||||
// and a zero timestamp.
|
||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
time, newRemote := version.Remove(remote)
|
||||
t = Timestamp(time)
|
||||
return
|
||||
newRemote = remote
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
if len(base) < len(versionFormat) {
|
||||
return
|
||||
}
|
||||
versionStart := len(base) - len(versionFormat)
|
||||
// Check it ends in -xxx
|
||||
if base[len(base)-4] != '-' {
|
||||
return
|
||||
}
|
||||
// Replace with .xxx for parsing
|
||||
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
||||
newT, err := time.Parse(versionFormat, base[versionStart:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is uninitialized
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
var (
|
||||
emptyT api.Timestamp
|
||||
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
||||
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
|
||||
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
||||
)
|
||||
|
||||
@@ -35,6 +36,40 @@ func TestTimestampUnmarshalJSON(t *testing.T) {
|
||||
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
||||
}
|
||||
|
||||
func TestTimestampAddVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
t api.Timestamp
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
|
||||
{t1, "potato", "potato-v2001-02-03-040506-123"},
|
||||
{t1, "", "-v2001-02-03-040506-123"},
|
||||
} {
|
||||
actual := test.t.AddVersion(test.in)
|
||||
assert.Equal(t, test.expected, actual, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampRemoveVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedT api.Timestamp
|
||||
expectedRemote string
|
||||
}{
|
||||
{"potato.txt", emptyT, "potato.txt"},
|
||||
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
|
||||
{"potato-v2001-02-03-040506-123", t1, "potato"},
|
||||
{"-v2001-02-03-040506-123", t1, ""},
|
||||
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
|
||||
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
|
||||
} {
|
||||
actualT, actualRemote := api.RemoveVersion(test.in)
|
||||
assert.Equal(t, test.expectedT, actualT, test.in)
|
||||
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampIsZero(t *testing.T) {
|
||||
assert.True(t, emptyT.IsZero())
|
||||
assert.False(t, t0.IsZero())
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"errors"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
@@ -54,10 +55,10 @@ const (
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
minChunkSize = 5 * fs.Mebi
|
||||
defaultChunkSize = 96 * fs.Mebi
|
||||
defaultUploadCutoff = 200 * fs.Mebi
|
||||
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
||||
minChunkSize = 5 * fs.MebiByte
|
||||
defaultChunkSize = 96 * fs.MebiByte
|
||||
defaultUploadCutoff = 200 * fs.MebiByte
|
||||
largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max
|
||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||
memoryPoolUseMmap = false
|
||||
)
|
||||
@@ -75,15 +76,15 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID.",
|
||||
Help: "Account ID or Application Key ID",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Application Key.",
|
||||
Help: "Application Key",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
Help: "Endpoint for the service.\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "test_mode",
|
||||
@@ -103,7 +104,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "versions",
|
||||
Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -116,34 +117,32 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
||||
|
||||
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||
|
||||
This value should be set no larger than 4.657 GiB (== 5 GB).`,
|
||||
This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy.
|
||||
Help: `Cutoff for switching to multipart copy
|
||||
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 4.6 GiB.`,
|
||||
The minimum is 0 and the maximum is 4.6GB.`,
|
||||
Default: largeFileCopyCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size.
|
||||
Help: `Upload chunk size. Must fit in memory.
|
||||
|
||||
When uploading large files, chunk the file into this size.
|
||||
|
||||
Must fit in memory. These chunks are buffered in memory and there
|
||||
might a maximum of "--transfers" chunks in progress at once.
|
||||
|
||||
5,000,000 Bytes is the minimum size.`,
|
||||
When uploading large files, chunk the file into this size. Note that
|
||||
these chunks are buffered in memory and there might a maximum of
|
||||
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
|
||||
minimum size.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files.
|
||||
Help: `Disable checksums for large (> upload cutoff) files
|
||||
|
||||
Normally rclone will calculate the SHA1 checksum of the input before
|
||||
uploading it so it can add it to metadata on the object. This is great
|
||||
@@ -346,11 +345,17 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
body, err := rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error reading error out of body")
|
||||
}
|
||||
// Decode error response
|
||||
errResponse := new(api.Error)
|
||||
err := rest.DecodeJSON(resp, &errResponse)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
||||
if len(body) > 0 {
|
||||
err := json.Unmarshal(body, &errResponse)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
||||
}
|
||||
}
|
||||
if errResponse.Code == "" {
|
||||
errResponse.Code = "unknown"
|
||||
@@ -359,14 +364,14 @@ func errorHandler(resp *http.Response) error {
|
||||
errResponse.Status = resp.StatusCode
|
||||
}
|
||||
if errResponse.Message == "" {
|
||||
errResponse.Message = "Unknown " + resp.Status
|
||||
errResponse.Message = fmt.Sprintf("Unknown: %s: %s", resp.Status, body)
|
||||
}
|
||||
return errResponse
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -381,7 +386,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
|
||||
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
|
||||
if cs < opt.ChunkSize {
|
||||
return fmt.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
|
||||
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -414,11 +419,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
err = checkUploadCutoff(opt, opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("b2: upload cutoff: %w", err)
|
||||
return nil, errors.Wrap(err, "b2: upload cutoff")
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("b2: chunk size: %w", err)
|
||||
return nil, errors.Wrap(err, "b2: chunk size")
|
||||
}
|
||||
if opt.Account == "" {
|
||||
return nil, errors.New("account not found")
|
||||
@@ -463,7 +468,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
err = f.authorizeAccount(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to authorize account: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to authorize account")
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
|
||||
@@ -472,7 +477,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
if allowedBucket != f.rootBucket {
|
||||
return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
}
|
||||
f.cache.MarkOK(f.rootBucket)
|
||||
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
|
||||
@@ -512,7 +517,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
return f.shouldRetryNoReauth(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to authenticate: %w", err)
|
||||
return errors.Wrap(err, "failed to authenticate")
|
||||
}
|
||||
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
||||
return nil
|
||||
@@ -558,7 +563,7 @@ func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUp
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
}
|
||||
return upload, nil
|
||||
}
|
||||
@@ -1048,7 +1053,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to create bucket: %w", err)
|
||||
return errors.Wrap(err, "failed to create bucket")
|
||||
}
|
||||
f.setBucketID(bucket, response.ID)
|
||||
f.setBucketType(bucket, response.Type)
|
||||
@@ -1083,7 +1088,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete bucket: %w", err)
|
||||
return errors.Wrap(err, "failed to delete bucket")
|
||||
}
|
||||
f.clearBucketID(bucket)
|
||||
f.clearBucketType(bucket)
|
||||
@@ -1124,7 +1129,7 @@ func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to hide %q: %w", bucketPath, err)
|
||||
return errors.Wrapf(err, "failed to hide %q", bucketPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1145,7 +1150,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete %q: %w", Name, err)
|
||||
return errors.Wrapf(err, "failed to delete %q", Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1355,7 +1360,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
|
||||
}
|
||||
var request = api.GetDownloadAuthorizationRequest{
|
||||
BucketID: bucketID,
|
||||
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.rootDirectory, remote)),
|
||||
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
|
||||
ValidDurationInSeconds: validDurationInSeconds,
|
||||
}
|
||||
var response api.GetDownloadAuthorizationResponse
|
||||
@@ -1364,7 +1369,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get download authorization: %w", err)
|
||||
return "", errors.Wrap(err, "failed to get download authorization")
|
||||
}
|
||||
return response.AuthorizationToken, nil
|
||||
}
|
||||
@@ -1669,14 +1674,14 @@ func (file *openFile) Close() (err error) {
|
||||
|
||||
// Check to see we read the correct number of bytes
|
||||
if file.o.Size() != file.bytes {
|
||||
return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
|
||||
return errors.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
|
||||
}
|
||||
|
||||
// Check the SHA1
|
||||
receivedSHA1 := file.o.sha1
|
||||
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
|
||||
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
|
||||
return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
|
||||
return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1716,7 +1721,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
|
||||
return nil, nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, nil, fmt.Errorf("failed to %s for download: %w", method, err)
|
||||
return nil, nil, errors.Wrapf(err, "failed to %s for download", method)
|
||||
}
|
||||
|
||||
// NB resp may be Open here - don't return err != nil without closing
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
@@ -101,7 +102,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
return nil, fmt.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
}
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
@@ -184,7 +185,7 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
}
|
||||
} else {
|
||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||
@@ -229,14 +230,14 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||
//
|
||||
// The number of bytes in the file being uploaded. Note that
|
||||
// this header is required; you cannot leave it out and just
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100 MB (100,000,000 bytes)
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100MB.
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
//
|
||||
// The SHA1 checksum of the this part of the file. B2 will
|
||||
// check this when the part is uploaded, to make sure that the
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// passed to b2_finish_large_file.
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -405,7 +406,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (e
|
||||
up.size += int64(n)
|
||||
if part > maxParts {
|
||||
up.f.putBuf(buf, false)
|
||||
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
|
||||
@@ -36,13 +36,13 @@ func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
|
||||
// Error is returned from box when things go wrong
|
||||
type Error struct {
|
||||
Type string `json:"type"`
|
||||
Status int `json:"status"`
|
||||
Code string `json:"code"`
|
||||
ContextInfo json.RawMessage `json:"context_info"`
|
||||
HelpURL string `json:"help_url"`
|
||||
Message string `json:"message"`
|
||||
RequestID string `json:"request_id"`
|
||||
Type string `json:"type"`
|
||||
Status int `json:"status"`
|
||||
Code string `json:"code"`
|
||||
ContextInfo json.RawMessage
|
||||
HelpURL string `json:"help_url"`
|
||||
Message string `json:"message"`
|
||||
RequestID string `json:"request_id"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
@@ -61,7 +61,7 @@ func (e *Error) Error() string {
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
@@ -90,12 +90,6 @@ type Item struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Access string `json:"access,omitempty"`
|
||||
} `json:"shared_link"`
|
||||
OwnedBy struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Login string `json:"login"`
|
||||
} `json:"owned_by"`
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the item
|
||||
@@ -109,11 +103,10 @@ func (i *Item) ModTime() (t time.Time) {
|
||||
|
||||
// FolderItems is returned from the GetFolderItems call
|
||||
type FolderItems struct {
|
||||
TotalCount int `json:"total_count"`
|
||||
Entries []Item `json:"entries"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
NextMarker *string `json:"next_marker,omitempty"`
|
||||
TotalCount int `json:"total_count"`
|
||||
Entries []Item `json:"entries"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
Order []struct {
|
||||
By string `json:"by"`
|
||||
Direction string `json:"direction"`
|
||||
@@ -139,38 +132,6 @@ type UploadFile struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
}
|
||||
|
||||
// PreUploadCheck is the request for upload preflight check
|
||||
type PreUploadCheck struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
Size *int64 `json:"size,omitempty"`
|
||||
}
|
||||
|
||||
// PreUploadCheckResponse is the response from upload preflight check
|
||||
// if successful
|
||||
type PreUploadCheckResponse struct {
|
||||
UploadToken string `json:"upload_token"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
}
|
||||
|
||||
// PreUploadCheckConflict is returned in the ContextInfo error field
|
||||
// from PreUploadCheck when the error code is "item_name_in_use"
|
||||
type PreUploadCheckConflict struct {
|
||||
Conflicts struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
FileVersion struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
Sha1 string `json:"sha1"`
|
||||
} `json:"file_version"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
Sha1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
} `json:"conflicts"`
|
||||
}
|
||||
|
||||
// UpdateFileModTime is used in Update File Info
|
||||
type UpdateFileModTime struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
|
||||
@@ -14,19 +14,24 @@ import (
|
||||
"crypto/rsa"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
|
||||
"github.com/youmark/pkcs8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -37,13 +42,9 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
@@ -56,6 +57,7 @@ const (
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api.box.com/2.0"
|
||||
uploadURL = "https://upload.box.com/api/2.0"
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
||||
defaultUploadCutoff = 50 * 1024 * 1024
|
||||
tokenURL = "https://api.box.com/oauth2/token"
|
||||
@@ -82,7 +84,7 @@ func init() {
|
||||
Name: "box",
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
||||
@@ -91,15 +93,15 @@ func init() {
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
|
||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||
}
|
||||
// Else, if not using an access token, use oauth2
|
||||
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "root_folder_id",
|
||||
@@ -108,23 +110,23 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
||||
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||
Help: "Box App Primary Access Token\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "box_sub_type",
|
||||
Default: "user",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "user",
|
||||
Help: "Rclone should act on behalf of a user.",
|
||||
Help: "Rclone should act on behalf of a user",
|
||||
}, {
|
||||
Value: "enterprise",
|
||||
Help: "Rclone should act on behalf of a service account.",
|
||||
Help: "Rclone should act on behalf of a service account",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50 MiB).",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -132,16 +134,6 @@ func init() {
|
||||
Help: "Max number of times to try committing a multipart file.",
|
||||
Default: 100,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Default: 1000,
|
||||
Help: "Size of listing chunk 1-1000.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "owned_by",
|
||||
Default: "",
|
||||
Help: "Only show items owned by the login (email address) passed in.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -165,15 +157,15 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
jsonFile = env.ShellExpand(jsonFile)
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get box config: %w", err)
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get decrypted private key: %w", err)
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
claims, err := getClaims(boxConfig, boxSubType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get claims: %w", err)
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
@@ -185,11 +177,11 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
file, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
||||
return nil, errors.Wrap(err, "box: failed to read Box config")
|
||||
}
|
||||
err = json.Unmarshal(file, &boxConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
|
||||
return nil, errors.Wrap(err, "box: failed to parse Box config")
|
||||
}
|
||||
return boxConfig, nil
|
||||
}
|
||||
@@ -197,7 +189,7 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
||||
val, err := jwtutil.RandomHex(20)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
|
||||
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
|
||||
}
|
||||
|
||||
claims = &jws.ClaimSet{
|
||||
@@ -238,12 +230,12 @@ func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err
|
||||
|
||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||
if len(rest) > 0 {
|
||||
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||
return nil, errors.Wrap(err, "box: extra data included in private key")
|
||||
}
|
||||
|
||||
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to decrypt private key: %w", err)
|
||||
return nil, errors.Wrap(err, "box: failed to decrypt private key")
|
||||
}
|
||||
|
||||
return rsaKey.(*rsa.PrivateKey), nil
|
||||
@@ -256,8 +248,6 @@ type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
AccessToken string `config:"access_token"`
|
||||
ListChunk int `config:"list_chunk"`
|
||||
OwnedBy string `config:"owned_by"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
@@ -337,13 +327,6 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
authRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
|
||||
// Box API errors which should be retries
|
||||
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "operation_blocked_temporary" {
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -358,7 +341,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
info = item
|
||||
return true
|
||||
@@ -401,7 +384,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
if opt.UploadCutoff < minUploadCutoff {
|
||||
return nil, fmt.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
@@ -412,7 +395,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.AccessToken == "" {
|
||||
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Box: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -533,7 +516,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, true, false, true, func(item *api.Item) bool {
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
@@ -589,20 +572,17 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, activeOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folders/" + dirID + "/items",
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
opts.Parameters.Set("limit", strconv.Itoa(f.opt.ListChunk))
|
||||
opts.Parameters.Set("usemarker", "true")
|
||||
var marker *string
|
||||
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
|
||||
offset := 0
|
||||
OUTER:
|
||||
for {
|
||||
if marker != nil {
|
||||
opts.Parameters.Set("marker", *marker)
|
||||
}
|
||||
opts.Parameters.Set("offset", strconv.Itoa(offset))
|
||||
|
||||
var result api.FolderItems
|
||||
var resp *http.Response
|
||||
@@ -611,7 +591,7 @@ OUTER:
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return found, fmt.Errorf("couldn't list files: %w", err)
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
for i := range result.Entries {
|
||||
item := &result.Entries[i]
|
||||
@@ -627,10 +607,7 @@ OUTER:
|
||||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||
continue
|
||||
}
|
||||
if activeOnly && item.ItemStatus != api.ItemStatusActive {
|
||||
continue
|
||||
}
|
||||
if f.opt.OwnedBy != "" && f.opt.OwnedBy != item.OwnedBy.Login {
|
||||
if item.ItemStatus != api.ItemStatusActive {
|
||||
continue
|
||||
}
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
@@ -639,8 +616,8 @@ OUTER:
|
||||
break OUTER
|
||||
}
|
||||
}
|
||||
marker = result.NextMarker
|
||||
if marker == nil {
|
||||
offset += result.Limit
|
||||
if offset >= result.TotalCount {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -662,7 +639,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.Type == api.ItemTypeFolder {
|
||||
// cache the directory ID for later lookups
|
||||
@@ -709,80 +686,22 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
return o, leaf, directoryID, nil
|
||||
}
|
||||
|
||||
// preUploadCheck checks to see if a file can be uploaded
|
||||
//
|
||||
// It returns "", nil if the file is good to go
|
||||
// It returns "ID", nil if the file must be updated
|
||||
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
|
||||
check := api.PreUploadCheck{
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
},
|
||||
}
|
||||
if size >= 0 {
|
||||
check.Size = &size
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "OPTIONS",
|
||||
Path: "/files/content/",
|
||||
}
|
||||
var result api.PreUploadCheckResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &check, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "item_name_in_use" {
|
||||
var conflict api.PreUploadCheckConflict
|
||||
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
|
||||
}
|
||||
if conflict.Conflicts.Type != api.ItemTypeFile {
|
||||
return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
|
||||
}
|
||||
return conflict.Conflicts.ID, nil
|
||||
}
|
||||
return "", fmt.Errorf("pre-upload check: %w", err)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// If directory doesn't exist, file doesn't exist so can upload
|
||||
remote := src.Remote()
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
}
|
||||
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
||||
switch err {
|
||||
case nil:
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Preflight check the upload, which returns the ID if the
|
||||
// object already exists
|
||||
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ID == "" {
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// If object exists then create a skeleton one with just id
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
id: ID,
|
||||
}
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
@@ -854,7 +773,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("rmdir failed: %w", err)
|
||||
return errors.Wrap(err, "rmdir failed")
|
||||
}
|
||||
f.dirCache.FlushDir(dir)
|
||||
if err != nil {
|
||||
@@ -898,7 +817,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
||||
dstPath := f.rootSlash() + remote
|
||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
@@ -982,7 +901,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read user info: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to read user info")
|
||||
}
|
||||
// FIXME max upload size would be useful to use in Update
|
||||
usage = &fs.Usage{
|
||||
@@ -1116,36 +1035,45 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||
|
||||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
var (
|
||||
deleteErrors = int64(0)
|
||||
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
_, err = f.listAll(ctx, "trash", false, false, false, func(item *api.Item) bool {
|
||||
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
|
||||
wg.Add(1)
|
||||
concurrencyControl <- struct{}{}
|
||||
go func() {
|
||||
defer func() {
|
||||
<-concurrencyControl
|
||||
wg.Done()
|
||||
}()
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folders/trash/items",
|
||||
Parameters: url.Values{
|
||||
"fields": []string{"type", "id"},
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
|
||||
offset := 0
|
||||
for {
|
||||
opts.Parameters.Set("offset", strconv.Itoa(offset))
|
||||
|
||||
var result api.FolderItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't list trash")
|
||||
}
|
||||
for i := range result.Entries {
|
||||
item := &result.Entries[i]
|
||||
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
|
||||
err := f.deletePermanently(ctx, item.Type, item.ID)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
|
||||
atomic.AddInt64(&deleteErrors, 1)
|
||||
return errors.Wrap(err, "failed to delete file")
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||
} else {
|
||||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||
continue
|
||||
}
|
||||
}
|
||||
offset += result.Limit
|
||||
if offset >= result.TotalCount {
|
||||
break
|
||||
}
|
||||
return false
|
||||
})
|
||||
wg.Wait()
|
||||
if deleteErrors != 0 {
|
||||
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
|
||||
}
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing as an
|
||||
@@ -1199,11 +1127,8 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
if info.Type == api.ItemTypeFolder {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
if info.Type != api.ItemTypeFile {
|
||||
return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile)
|
||||
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
||||
}
|
||||
o.hasMetaData = true
|
||||
o.size = int64(info.Size)
|
||||
@@ -1303,7 +1228,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// upload does a single non-multipart upload
|
||||
//
|
||||
// This is recommended for less than 50 MiB of content
|
||||
// This is recommended for less than 50 MB of content
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
|
||||
upload := api.UploadFile{
|
||||
Name: o.fs.opt.Enc.FromStandardName(leaf),
|
||||
@@ -1339,7 +1264,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
||||
return err
|
||||
}
|
||||
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
||||
return fmt.Errorf("failed to upload %v - not sure why", o)
|
||||
return errors.Errorf("failed to upload %v - not sure why", o)
|
||||
}
|
||||
return o.setMetaData(&result.Entries[0])
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
@@ -140,7 +140,7 @@ outer:
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
||||
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
|
||||
@@ -151,7 +151,7 @@ outer:
|
||||
}
|
||||
err = json.Unmarshal(body, &result)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't decode commit response: %q: %w", body, err)
|
||||
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -177,7 +177,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
||||
// Create upload session
|
||||
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("multipart upload create session failed: %w", err)
|
||||
return errors.Wrap(err, "multipart upload create session failed")
|
||||
}
|
||||
chunkSize := session.PartSize
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
|
||||
@@ -222,7 +222,7 @@ outer:
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(in, buf)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
err = errors.Wrap(err, "multipart upload failed to read source")
|
||||
break outer
|
||||
}
|
||||
|
||||
@@ -238,7 +238,7 @@ outer:
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("multipart upload failed to upload part: %w", err)
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
@@ -266,11 +266,11 @@ outer:
|
||||
// Finalise the upload session
|
||||
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
|
||||
if err != nil {
|
||||
return fmt.Errorf("multipart upload failed to finalize: %w", err)
|
||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||
}
|
||||
|
||||
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
||||
return fmt.Errorf("multipart upload failed %v - not sure why", o)
|
||||
return errors.Errorf("multipart upload failed %v - not sure why", o)
|
||||
}
|
||||
return o.setMetaData(&result.Entries[0])
|
||||
}
|
||||
|
||||
100
backend/cache/cache.go
vendored
100
backend/cache/cache.go
vendored
@@ -1,11 +1,9 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@@ -20,6 +18,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
@@ -69,26 +68,26 @@ func init() {
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "plex_url",
|
||||
Help: "The URL of the Plex server.",
|
||||
Help: "The URL of the Plex server",
|
||||
}, {
|
||||
Name: "plex_username",
|
||||
Help: "The username of the Plex user.",
|
||||
Help: "The username of the Plex user",
|
||||
}, {
|
||||
Name: "plex_password",
|
||||
Help: "The password of the Plex user.",
|
||||
Help: "The password of the Plex user",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "plex_token",
|
||||
Help: "The plex token for authentication - auto set normally.",
|
||||
Help: "The plex token for authentication - auto set normally",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "plex_insecure",
|
||||
Help: "Skip all certificate verification when connecting to the Plex server.",
|
||||
Help: "Skip all certificate verification when connecting to the Plex server",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
@@ -99,14 +98,14 @@ changed, any downloaded chunks will be invalid and cache-chunk-path
|
||||
will need to be cleared or unexpected EOF errors will occur.`,
|
||||
Default: DefCacheChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "1M",
|
||||
Help: "1 MiB",
|
||||
Value: "1m",
|
||||
Help: "1MB",
|
||||
}, {
|
||||
Value: "5M",
|
||||
Help: "5 MiB",
|
||||
Help: "5 MB",
|
||||
}, {
|
||||
Value: "10M",
|
||||
Help: "10 MiB",
|
||||
Help: "10 MB",
|
||||
}},
|
||||
}, {
|
||||
Name: "info_age",
|
||||
@@ -133,22 +132,22 @@ oldest chunks until it goes under this value.`,
|
||||
Default: DefCacheTotalChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "500M",
|
||||
Help: "500 MiB",
|
||||
Help: "500 MB",
|
||||
}, {
|
||||
Value: "1G",
|
||||
Help: "1 GiB",
|
||||
Help: "1 GB",
|
||||
}, {
|
||||
Value: "10G",
|
||||
Help: "10 GiB",
|
||||
Help: "10 GB",
|
||||
}},
|
||||
}, {
|
||||
Name: "db_path",
|
||||
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
|
||||
Help: "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name.",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_path",
|
||||
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: `Directory to cache chunk files.
|
||||
|
||||
Path to where partial file data (chunks) are stored locally. The remote
|
||||
@@ -168,7 +167,6 @@ then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
|
||||
Name: "chunk_clean_interval",
|
||||
Default: DefCacheChunkCleanInterval,
|
||||
Help: `How often should the cache perform cleanups of the chunk storage.
|
||||
|
||||
The default value should be ok for most people. If you find that the
|
||||
cache goes over "cache-chunk-total-size" too often then try to lower
|
||||
this value to force it to perform cleanups more often.`,
|
||||
@@ -222,7 +220,7 @@ available on the local machine.`,
|
||||
}, {
|
||||
Name: "rps",
|
||||
Default: int(DefCacheRps),
|
||||
Help: `Limits the number of requests per second to the source FS (-1 to disable).
|
||||
Help: `Limits the number of requests per second to the source FS (-1 to disable)
|
||||
|
||||
This setting places a hard limit on the number of requests per second
|
||||
that cache will be doing to the cloud provider remote and try to
|
||||
@@ -243,7 +241,7 @@ still pass.`,
|
||||
}, {
|
||||
Name: "writes",
|
||||
Default: DefCacheWrites,
|
||||
Help: `Cache file data on writes through the FS.
|
||||
Help: `Cache file data on writes through the FS
|
||||
|
||||
If you need to read files immediately after you upload them through
|
||||
cache you can enable this flag to have their data stored in the
|
||||
@@ -264,7 +262,7 @@ provider`,
|
||||
}, {
|
||||
Name: "tmp_wait_time",
|
||||
Default: DefCacheTmpWaitTime,
|
||||
Help: `How long should files be stored in local cache before being uploaded.
|
||||
Help: `How long should files be stored in local cache before being uploaded
|
||||
|
||||
This is the duration that a file must wait in the temporary location
|
||||
_cache-tmp-upload-path_ before it is selected for upload.
|
||||
@@ -275,7 +273,7 @@ to start the upload if a queue formed for this purpose.`,
|
||||
}, {
|
||||
Name: "db_wait_time",
|
||||
Default: DefCacheDbWaitTime,
|
||||
Help: `How long to wait for the DB to be available - 0 is unlimited.
|
||||
Help: `How long to wait for the DB to be available - 0 is unlimited
|
||||
|
||||
Only one process can have the DB open at any one time, so rclone waits
|
||||
for this duration for the DB to become available before it gives an
|
||||
@@ -341,14 +339,8 @@ func parseRootPath(path string) (string, error) {
|
||||
return strings.Trim(path, "/"), nil
|
||||
}
|
||||
|
||||
var warnDeprecated sync.Once
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
warnDeprecated.Do(func() {
|
||||
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
|
||||
})
|
||||
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -356,7 +348,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
|
||||
return nil, fmt.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||
return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
|
||||
}
|
||||
|
||||
@@ -366,13 +358,13 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
|
||||
rpath, err := parseRootPath(rootPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to clean root path %q: %w", rootPath, err)
|
||||
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
||||
}
|
||||
|
||||
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
|
||||
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
|
||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remotePath, wrapErr)
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
||||
}
|
||||
var fsErr error
|
||||
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
||||
@@ -401,7 +393,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
if opt.PlexToken != "" {
|
||||
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||
}
|
||||
} else {
|
||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
@@ -413,7 +405,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -422,8 +414,8 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
dbPath := f.opt.DbPath
|
||||
chunkPath := f.opt.ChunkPath
|
||||
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
|
||||
if dbPath != filepath.Join(config.GetCacheDir(), "cache-backend") &&
|
||||
chunkPath == filepath.Join(config.GetCacheDir(), "cache-backend") {
|
||||
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
|
||||
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
|
||||
chunkPath = dbPath
|
||||
}
|
||||
if filepath.Ext(dbPath) != "" {
|
||||
@@ -434,11 +426,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
}
|
||||
err = os.MkdirAll(dbPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create cache directory %v: %w", dbPath, err)
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", dbPath)
|
||||
}
|
||||
err = os.MkdirAll(chunkPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create cache directory %v: %w", chunkPath, err)
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", chunkPath)
|
||||
}
|
||||
|
||||
dbPath = filepath.Join(dbPath, name+".db")
|
||||
@@ -450,7 +442,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
DbWaitTime: time.Duration(opt.DbWaitTime),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start cache db: %w", err)
|
||||
return nil, errors.Wrapf(err, "failed to start cache db")
|
||||
}
|
||||
// Trap SIGINT and SIGTERM to close the DB handle gracefully
|
||||
c := make(chan os.Signal, 1)
|
||||
@@ -484,12 +476,12 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
if f.opt.TempWritePath != "" {
|
||||
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create cache directory %v: %w", f.opt.TempWritePath, err)
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
||||
}
|
||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp fs: %w", err)
|
||||
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
||||
}
|
||||
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
|
||||
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
|
||||
@@ -606,7 +598,7 @@ func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err er
|
||||
out = make(rc.Params)
|
||||
m, err := f.Stats()
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("error while getting cache stats")
|
||||
return out, errors.Errorf("error while getting cache stats")
|
||||
}
|
||||
out["status"] = "ok"
|
||||
out["stats"] = m
|
||||
@@ -633,7 +625,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
||||
out = make(rc.Params)
|
||||
remoteInt, ok := in["remote"]
|
||||
if !ok {
|
||||
return out, fmt.Errorf("remote is needed")
|
||||
return out, errors.Errorf("remote is needed")
|
||||
}
|
||||
remote := remoteInt.(string)
|
||||
withData := false
|
||||
@@ -644,7 +636,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
||||
|
||||
remote = f.unwrapRemote(remote)
|
||||
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
|
||||
return out, fmt.Errorf("%s doesn't exist in cache", remote)
|
||||
return out, errors.Errorf("%s doesn't exist in cache", remote)
|
||||
}
|
||||
|
||||
co := NewObject(f, remote)
|
||||
@@ -653,7 +645,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
||||
cd := NewDirectory(f, remote)
|
||||
err := f.cache.ExpireDir(cd)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("error expiring directory: %w", err)
|
||||
return out, errors.WithMessage(err, "error expiring directory")
|
||||
}
|
||||
// notify vfs too
|
||||
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
|
||||
@@ -664,7 +656,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
||||
// expire the entry
|
||||
err = f.cache.ExpireObject(co, withData)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("error expiring file: %w", err)
|
||||
return out, errors.WithMessage(err, "error expiring file")
|
||||
}
|
||||
// notify vfs too
|
||||
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
|
||||
@@ -685,24 +677,24 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
case 1:
|
||||
start, err = strconv.ParseInt(ints[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid range: %q", part)
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
}
|
||||
end = start + 1
|
||||
case 2:
|
||||
if ints[0] != "" {
|
||||
start, err = strconv.ParseInt(ints[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid range: %q", part)
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
}
|
||||
}
|
||||
if ints[1] != "" {
|
||||
end, err = strconv.ParseInt(ints[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid range: %q", part)
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid range: %q", part)
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
}
|
||||
crs = append(crs, chunkRange{start: start, end: end})
|
||||
}
|
||||
@@ -757,18 +749,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
delete(in, "chunks")
|
||||
crs, err := parseChunks(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid chunks parameter: %w", err)
|
||||
return nil, errors.Wrap(err, "invalid chunks parameter")
|
||||
}
|
||||
var files [][2]string
|
||||
for k, v := range in {
|
||||
if !strings.HasPrefix(k, "file") {
|
||||
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
|
||||
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
|
||||
}
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
files = append(files, [2]string{v, f.unwrapRemote(v)})
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
|
||||
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
|
||||
}
|
||||
}
|
||||
type fileStatus struct {
|
||||
@@ -1124,7 +1116,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
case fs.Directory:
|
||||
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
||||
default:
|
||||
return fmt.Errorf("Unknown object type %T", entry)
|
||||
return errors.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
55
backend/cache/cache_internal_test.go
vendored
55
backend/cache/cache_internal_test.go
vendored
@@ -1,5 +1,5 @@
|
||||
//go:build !plan9 && !js && !race
|
||||
// +build !plan9,!js,!race
|
||||
// +build !plan9,!js
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -17,12 +16,12 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/cache"
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
@@ -294,9 +293,6 @@ func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
@@ -446,7 +442,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
return err
|
||||
}
|
||||
if coSize != expectedSize {
|
||||
return fmt.Errorf("%v <> %v", coSize, expectedSize)
|
||||
return errors.Errorf("%v <> %v", coSize, expectedSize)
|
||||
}
|
||||
return nil
|
||||
}, 12, time.Second*10)
|
||||
@@ -502,7 +498,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
}
|
||||
if len(li) != 2 {
|
||||
log.Printf("not expected listing /test: %v", li)
|
||||
return fmt.Errorf("not expected listing /test: %v", li)
|
||||
return errors.Errorf("not expected listing /test: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/one")
|
||||
@@ -512,7 +508,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
}
|
||||
if len(li) != 0 {
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
return errors.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/second")
|
||||
@@ -522,21 +518,21 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
}
|
||||
if len(li) != 1 {
|
||||
log.Printf("not expected listing /test/second: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/second: %v", li)
|
||||
return errors.Errorf("not expected listing /test/second: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "data.bin" {
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
return errors.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/second/data.bin" {
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
return errors.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
return errors.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
|
||||
log.Printf("complete listing: %v", li)
|
||||
@@ -591,17 +587,17 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
if !found {
|
||||
log.Printf("not found /test")
|
||||
return fmt.Errorf("not found /test")
|
||||
return errors.Errorf("not found /test")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one")
|
||||
return fmt.Errorf("not found /test/one")
|
||||
return errors.Errorf("not found /test/one")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one/test2")
|
||||
return fmt.Errorf("not found /test/one/test2")
|
||||
return errors.Errorf("not found /test/one/test2")
|
||||
}
|
||||
li, err := runInstance.list(t, rootFs, "test/one")
|
||||
if err != nil {
|
||||
@@ -610,21 +606,21 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
}
|
||||
if len(li) != 1 {
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
return errors.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "test2" {
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
return errors.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/one/test2" {
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
return errors.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
return errors.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
log.Printf("complete listing /test/one/test2")
|
||||
return nil
|
||||
@@ -685,9 +681,6 @@ func TestInternalCacheWrites(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
@@ -843,7 +836,7 @@ func newRun() *run {
|
||||
if uploadDir == "" {
|
||||
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
|
||||
log.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
} else {
|
||||
r.tmpUploadDir = uploadDir
|
||||
@@ -926,9 +919,9 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
}
|
||||
}
|
||||
runInstance.rootIsCrypt = rootIsCrypt
|
||||
runInstance.dbPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote+".db")
|
||||
runInstance.chunkPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote)
|
||||
runInstance.vfsCachePath = filepath.Join(config.GetCacheDir(), "vfs", remote)
|
||||
runInstance.dbPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
|
||||
runInstance.chunkPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
|
||||
runInstance.vfsCachePath = filepath.Join(config.CacheDir, "vfs", remote)
|
||||
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1062,7 +1055,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
||||
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
|
||||
|
||||
if !noLengthCheck && size != int64(len(checkSample)) {
|
||||
return checkSample, fmt.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
|
||||
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
|
||||
}
|
||||
return checkSample, nil
|
||||
}
|
||||
@@ -1257,7 +1250,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
||||
case state = <-buCh:
|
||||
// continue
|
||||
case <-time.After(maxDuration):
|
||||
waitCh <- fmt.Errorf("Timed out waiting for background upload: %v", remote)
|
||||
waitCh <- errors.Errorf("Timed out waiting for background upload: %v", remote)
|
||||
return
|
||||
}
|
||||
checkRemote := state.Remote
|
||||
@@ -1274,7 +1267,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
||||
return
|
||||
}
|
||||
}
|
||||
waitCh <- fmt.Errorf("Too many attempts to wait for the background upload: %v", remote)
|
||||
waitCh <- errors.Errorf("Too many attempts to wait for the background upload: %v", remote)
|
||||
}()
|
||||
return waitCh
|
||||
}
|
||||
|
||||
4
backend/cache/cache_test.go
vendored
4
backend/cache/cache_test.go
vendored
@@ -1,7 +1,7 @@
|
||||
// Test Cache filesystem interface
|
||||
|
||||
//go:build !plan9 && !js && !race
|
||||
// +build !plan9,!js,!race
|
||||
// +build !plan9,!js
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
1
backend/cache/cache_unsupported.go
vendored
1
backend/cache/cache_unsupported.go
vendored
@@ -1,7 +1,6 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || js
|
||||
// +build plan9 js
|
||||
|
||||
package cache
|
||||
|
||||
4
backend/cache/cache_upload_test.go
vendored
4
backend/cache/cache_upload_test.go
vendored
@@ -1,5 +1,5 @@
|
||||
//go:build !plan9 && !js && !race
|
||||
// +build !plan9,!js,!race
|
||||
// +build !plan9,!js
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
1
backend/cache/directory.go
vendored
1
backend/cache/directory.go
vendored
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
7
backend/cache/handle.go
vendored
7
backend/cache/handle.go
vendored
@@ -1,11 +1,9 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
@@ -14,6 +12,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
@@ -243,7 +242,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("chunk not found %v", chunkStart)
|
||||
return nil, errors.Errorf("chunk not found %v", chunkStart)
|
||||
}
|
||||
|
||||
// first chunk will be aligned with the start
|
||||
@@ -323,7 +322,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
|
||||
r.offset = r.cachedObject.Size() + offset
|
||||
default:
|
||||
err = fmt.Errorf("cache: unimplemented seek whence %v", whence)
|
||||
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
||||
}
|
||||
|
||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||
|
||||
15
backend/cache/object.go
vendored
15
backend/cache/object.go
vendored
@@ -1,16 +1,15 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -178,14 +177,10 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
|
||||
}
|
||||
if o.isTempFile() {
|
||||
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("in parent fs %v: %w", o.ParentFs, err)
|
||||
}
|
||||
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
||||
} else {
|
||||
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("in cache fs %v: %w", o.CacheFs.Fs, err)
|
||||
}
|
||||
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
||||
}
|
||||
if err != nil {
|
||||
fs.Errorf(o, "error refreshing object in : %v", err)
|
||||
@@ -257,7 +252,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||
return fmt.Errorf("%v is currently uploading, can't update", o)
|
||||
return errors.Errorf("%v is currently uploading, can't update", o)
|
||||
}
|
||||
}
|
||||
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
||||
@@ -296,7 +291,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||
return fmt.Errorf("%v is currently uploading, can't delete", o)
|
||||
return errors.Errorf("%v is currently uploading, can't delete", o)
|
||||
}
|
||||
}
|
||||
err := o.Object.Remove(ctx)
|
||||
|
||||
1
backend/cache/plex.go
vendored
1
backend/cache/plex.go
vendored
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
5
backend/cache/storage_memory.go
vendored
5
backend/cache/storage_memory.go
vendored
@@ -1,15 +1,14 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
@@ -53,7 +52,7 @@ func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("couldn't get cached object data at offset %v", offset)
|
||||
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
|
||||
}
|
||||
|
||||
// AddChunk adds a new chunk of a cached object
|
||||
|
||||
84
backend/cache/storage_persistent.go
vendored
84
backend/cache/storage_persistent.go
vendored
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
@@ -17,6 +16,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
@@ -119,11 +119,11 @@ func (b *Persistent) connect() error {
|
||||
|
||||
err = os.MkdirAll(b.dataPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err)
|
||||
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
|
||||
}
|
||||
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err)
|
||||
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
|
||||
}
|
||||
if b.features.PurgeDb {
|
||||
b.Purge()
|
||||
@@ -175,7 +175,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
|
||||
err := b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(remote, false, tx)
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("couldn't open bucket (%v)", remote)
|
||||
return errors.Errorf("couldn't open bucket (%v)", remote)
|
||||
}
|
||||
|
||||
data := bucket.Get([]byte("."))
|
||||
@@ -183,7 +183,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
|
||||
return json.Unmarshal(data, cd)
|
||||
}
|
||||
|
||||
return fmt.Errorf("%v not found", remote)
|
||||
return errors.Errorf("%v not found", remote)
|
||||
})
|
||||
|
||||
return cd, err
|
||||
@@ -208,7 +208,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
|
||||
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
|
||||
}
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
|
||||
return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
|
||||
}
|
||||
|
||||
for _, cachedDir := range cachedDirs {
|
||||
@@ -225,7 +225,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
|
||||
|
||||
encoded, err := json.Marshal(cachedDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
||||
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
||||
}
|
||||
err = b.Put([]byte("."), encoded)
|
||||
if err != nil {
|
||||
@@ -243,17 +243,17 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
|
||||
err := b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cachedDir.abs(), false, tx)
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs())
|
||||
return errors.Errorf("couldn't open bucket (%v)", cachedDir.abs())
|
||||
}
|
||||
|
||||
val := bucket.Get([]byte("."))
|
||||
if val != nil {
|
||||
err := json.Unmarshal(val, cachedDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during unmarshalling obj: %v", err)
|
||||
return errors.Errorf("error during unmarshalling obj: %v", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("missing cached dir: %v", cachedDir)
|
||||
return errors.Errorf("missing cached dir: %v", cachedDir)
|
||||
}
|
||||
|
||||
c := bucket.Cursor()
|
||||
@@ -268,7 +268,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
|
||||
// we try to find a cached meta for the dir
|
||||
currentBucket := c.Bucket().Bucket(k)
|
||||
if currentBucket == nil {
|
||||
return fmt.Errorf("couldn't open bucket (%v)", string(k))
|
||||
return errors.Errorf("couldn't open bucket (%v)", string(k))
|
||||
}
|
||||
|
||||
metaKey := currentBucket.Get([]byte("."))
|
||||
@@ -317,7 +317,7 @@ func (b *Persistent) RemoveDir(fp string) error {
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("couldn't open bucket (%v)", fp)
|
||||
return errors.Errorf("couldn't open bucket (%v)", fp)
|
||||
}
|
||||
// delete the cached dir
|
||||
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
|
||||
@@ -377,13 +377,13 @@ func (b *Persistent) GetObject(cachedObject *Object) (err error) {
|
||||
return b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cachedObject.Dir, false, tx)
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
|
||||
return errors.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
|
||||
}
|
||||
val := bucket.Get([]byte(cachedObject.Name))
|
||||
if val != nil {
|
||||
return json.Unmarshal(val, cachedObject)
|
||||
}
|
||||
return fmt.Errorf("couldn't find object (%v)", cachedObject.Name)
|
||||
return errors.Errorf("couldn't find object (%v)", cachedObject.Name)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -392,16 +392,16 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cachedObject.Dir, true, tx)
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject)
|
||||
return errors.Errorf("couldn't open parent bucket for %v", cachedObject)
|
||||
}
|
||||
// cache Object Info
|
||||
encoded, err := json.Marshal(cachedObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -413,7 +413,7 @@ func (b *Persistent) RemoveObject(fp string) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
|
||||
return errors.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
|
||||
}
|
||||
err := bucket.Delete([]byte(cleanPath(objName)))
|
||||
if err != nil {
|
||||
@@ -445,7 +445,7 @@ func (b *Persistent) HasEntry(remote string) bool {
|
||||
err := b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(dir, false, tx)
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("couldn't open parent bucket for %v", remote)
|
||||
return errors.Errorf("couldn't open parent bucket for %v", remote)
|
||||
}
|
||||
if f := bucket.Bucket([]byte(name)); f != nil {
|
||||
return nil
|
||||
@@ -454,7 +454,7 @@ func (b *Persistent) HasEntry(remote string) bool {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("couldn't find object (%v)", remote)
|
||||
return errors.Errorf("couldn't find object (%v)", remote)
|
||||
})
|
||||
if err == nil {
|
||||
return true
|
||||
@@ -554,7 +554,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||
if dataTsBucket == nil {
|
||||
return fmt.Errorf("Couldn't open (%v) bucket", DataTsBucket)
|
||||
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
|
||||
}
|
||||
// iterate through ts
|
||||
c := dataTsBucket.Cursor()
|
||||
@@ -732,7 +732,7 @@ func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("not found %v-%v", path, offset)
|
||||
return errors.Errorf("not found %v-%v", path, offset)
|
||||
})
|
||||
|
||||
return t, err
|
||||
@@ -772,7 +772,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
tempObj := &tempUploadInfo{
|
||||
DestPath: destPath,
|
||||
@@ -783,11 +783,11 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
||||
// cache Object Info
|
||||
encoded, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||
}
|
||||
err = bucket.Put([]byte(destPath), encoded)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -802,7 +802,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
|
||||
c := bucket.Cursor()
|
||||
@@ -835,7 +835,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("no pending upload found")
|
||||
return errors.Errorf("no pending upload found")
|
||||
})
|
||||
|
||||
return destPath, err
|
||||
@@ -846,14 +846,14 @@ func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(tempBucket))
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
|
||||
var tempObj = &tempUploadInfo{}
|
||||
v := bucket.Get([]byte(remote))
|
||||
err = json.Unmarshal(v, tempObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
}
|
||||
|
||||
started = tempObj.Started
|
||||
@@ -868,7 +868,7 @@ func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, e
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(tempBucket))
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
|
||||
c := bucket.Cursor()
|
||||
@@ -898,22 +898,22 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
var tempObj = &tempUploadInfo{}
|
||||
v := bucket.Get([]byte(remote))
|
||||
err = json.Unmarshal(v, tempObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
}
|
||||
tempObj.Started = false
|
||||
v2, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
return errors.Errorf("pending upload not updated %v", err)
|
||||
}
|
||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
return errors.Errorf("pending upload not updated %v", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -926,7 +926,7 @@ func (b *Persistent) removePendingUpload(remote string) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
return bucket.Delete([]byte(remote))
|
||||
})
|
||||
@@ -941,17 +941,17 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
|
||||
var tempObj = &tempUploadInfo{}
|
||||
v := bucket.Get([]byte(remote))
|
||||
err = json.Unmarshal(v, tempObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
}
|
||||
if tempObj.Started {
|
||||
return fmt.Errorf("pending upload already started %v", remote)
|
||||
return errors.Errorf("pending upload already started %v", remote)
|
||||
}
|
||||
err = fn(tempObj)
|
||||
if err != nil {
|
||||
@@ -969,11 +969,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
||||
}
|
||||
v2, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
return errors.Errorf("pending upload not updated %v", err)
|
||||
}
|
||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
return errors.Errorf("pending upload not updated %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1014,11 +1014,11 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
|
||||
// cache Object Info
|
||||
encoded, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||
}
|
||||
err = bucket.Put([]byte(destPath), encoded)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
}
|
||||
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
@@ -22,6 +21,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
@@ -150,13 +150,12 @@ func init() {
|
||||
Name: "remote",
|
||||
Required: true,
|
||||
Help: `Remote to chunk/unchunk.
|
||||
|
||||
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||
"myremote:bucket" or maybe "myremote:" (not recommended).`,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Advanced: false,
|
||||
Default: fs.SizeSuffix(2147483648), // 2 GiB
|
||||
Default: fs.SizeSuffix(2147483648), // 2GB
|
||||
Help: `Files larger than chunk size will be split in chunks.`,
|
||||
}, {
|
||||
Name: "name_format",
|
||||
@@ -164,7 +163,6 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||
Hide: fs.OptionHideCommandLine,
|
||||
Default: `*.rclone_chunk.###`,
|
||||
Help: `String format of chunk file names.
|
||||
|
||||
The two placeholders are: base file name (*) and chunk number (#...).
|
||||
There must be one and only one asterisk and one or more consecutive hash characters.
|
||||
If chunk number has less digits than the number of hashes, it is left-padded by zeros.
|
||||
@@ -176,57 +174,48 @@ Possible chunk files are ignored if their name does not match given format.`,
|
||||
Hide: fs.OptionHideCommandLine,
|
||||
Default: 1,
|
||||
Help: `Minimum valid chunk number. Usually 0 or 1.
|
||||
|
||||
By default chunk numbers start from 1.`,
|
||||
}, {
|
||||
Name: "meta_format",
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideCommandLine,
|
||||
Default: "simplejson",
|
||||
Help: `Format of the metadata object or "none".
|
||||
|
||||
By default "simplejson".
|
||||
Help: `Format of the metadata object or "none". By default "simplejson".
|
||||
Metadata is a small JSON file named after the composite file.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "none",
|
||||
Help: `Do not use metadata files at all.
|
||||
Requires hash type "none".`,
|
||||
Help: `Do not use metadata files at all. Requires hash type "none".`,
|
||||
}, {
|
||||
Value: "simplejson",
|
||||
Help: `Simple JSON supports hash sums and chunk validation.
|
||||
|
||||
It has the following fields: ver, size, nchunks, md5, sha1.`,
|
||||
}},
|
||||
}, {
|
||||
Name: "hash_type",
|
||||
Advanced: false,
|
||||
Default: "md5",
|
||||
Help: `Choose how chunker handles hash sums.
|
||||
|
||||
All modes but "none" require metadata.`,
|
||||
Help: `Choose how chunker handles hash sums. All modes but "none" require metadata.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "none",
|
||||
Help: `Pass any hash supported by wrapped remote for non-chunked files.
|
||||
Return nothing otherwise.`,
|
||||
Help: `Pass any hash supported by wrapped remote for non-chunked files, return nothing otherwise`,
|
||||
}, {
|
||||
Value: "md5",
|
||||
Help: `MD5 for composite files.`,
|
||||
Help: `MD5 for composite files`,
|
||||
}, {
|
||||
Value: "sha1",
|
||||
Help: `SHA1 for composite files.`,
|
||||
Help: `SHA1 for composite files`,
|
||||
}, {
|
||||
Value: "md5all",
|
||||
Help: `MD5 for all files.`,
|
||||
Help: `MD5 for all files`,
|
||||
}, {
|
||||
Value: "sha1all",
|
||||
Help: `SHA1 for all files.`,
|
||||
Help: `SHA1 for all files`,
|
||||
}, {
|
||||
Value: "md5quick",
|
||||
Help: `Copying a file to chunker will request MD5 from the source.
|
||||
Falling back to SHA1 if unsupported.`,
|
||||
Help: `Copying a file to chunker will request MD5 from the source falling back to SHA1 if unsupported`,
|
||||
}, {
|
||||
Value: "sha1quick",
|
||||
Help: `Similar to "md5quick" but prefers SHA1 over MD5.`,
|
||||
Help: `Similar to "md5quick" but prefers SHA1 over MD5`,
|
||||
}},
|
||||
}, {
|
||||
Name: "fail_hard",
|
||||
@@ -290,13 +279,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
|
||||
baseName, basePath, err := fspath.SplitFs(remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := fspath.JoinRootPath(basePath, rpath)
|
||||
baseFs, err := cache.Get(ctx, baseName+remotePath)
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", baseName+remotePath, err)
|
||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
|
||||
}
|
||||
if !operations.CanServerSideMove(baseFs) {
|
||||
return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy")
|
||||
@@ -386,7 +375,7 @@ type Fs struct {
|
||||
// configure must be called only from NewFs or by unit tests.
|
||||
func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error {
|
||||
if err := f.setChunkNameFormat(nameFormat); err != nil {
|
||||
return fmt.Errorf("invalid name format '%s': %w", nameFormat, err)
|
||||
return errors.Wrapf(err, "invalid name format '%s'", nameFormat)
|
||||
}
|
||||
if err := f.setMetaFormat(metaFormat); err != nil {
|
||||
return err
|
||||
@@ -443,10 +432,10 @@ func (f *Fs) setHashType(hashType string) error {
|
||||
f.hashFallback = true
|
||||
case "md5all":
|
||||
f.useMD5 = true
|
||||
f.hashAll = !f.base.Hashes().Contains(hash.MD5) || f.base.Features().SlowHash
|
||||
f.hashAll = !f.base.Hashes().Contains(hash.MD5)
|
||||
case "sha1all":
|
||||
f.useSHA1 = true
|
||||
f.hashAll = !f.base.Hashes().Contains(hash.SHA1) || f.base.Features().SlowHash
|
||||
f.hashAll = !f.base.Hashes().Contains(hash.SHA1)
|
||||
default:
|
||||
return fmt.Errorf("unsupported hash type '%s'", hashType)
|
||||
}
|
||||
@@ -823,7 +812,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
tempEntries = append(tempEntries, wrapDir)
|
||||
default:
|
||||
if f.opt.FailHard {
|
||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
||||
return nil, fmt.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
fs.Debugf(f, "unknown object type %T", entry)
|
||||
}
|
||||
@@ -878,7 +867,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// ignores non-chunked objects and skips chunk size checks.
|
||||
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
|
||||
if err := f.forbidChunk(false, remote); err != nil {
|
||||
return nil, fmt.Errorf("can't access: %w", err)
|
||||
return nil, errors.Wrap(err, "can't access")
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -927,7 +916,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
||||
case fs.ErrorDirNotFound:
|
||||
entries = nil
|
||||
default:
|
||||
return nil, fmt.Errorf("can't detect composite file: %w", err)
|
||||
return nil, errors.Wrap(err, "can't detect composite file")
|
||||
}
|
||||
|
||||
if f.useNoRename {
|
||||
@@ -1067,7 +1056,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
case ErrMetaTooBig, ErrMetaUnknown:
|
||||
return err // return these errors unwrapped for unit tests
|
||||
default:
|
||||
return fmt.Errorf("invalid metadata: %w", err)
|
||||
return errors.Wrap(err, "invalid metadata")
|
||||
}
|
||||
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
|
||||
return errors.New("metadata doesn't match file size")
|
||||
@@ -1110,7 +1099,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||
|
||||
switch o.f.opt.MetaFormat {
|
||||
case "simplejson":
|
||||
if len(data) > maxMetadataSizeWritten {
|
||||
if data != nil && len(data) > maxMetadataSizeWritten {
|
||||
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
@@ -1132,7 +1121,7 @@ func (f *Fs) put(
|
||||
|
||||
// Perform consistency checks
|
||||
if err := f.forbidChunk(src, remote); err != nil {
|
||||
return nil, fmt.Errorf("%s refused: %w", action, err)
|
||||
return nil, errors.Wrap(err, action+" refused")
|
||||
}
|
||||
if target == nil {
|
||||
// Get target object with a quick directory scan
|
||||
@@ -1146,7 +1135,7 @@ func (f *Fs) put(
|
||||
obj := target.(*Object)
|
||||
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
|
||||
// refuse to update a file of unsupported format
|
||||
return nil, fmt.Errorf("refusing to %s: %w", action, err)
|
||||
return nil, errors.Wrap(err, "refusing to "+action)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1225,7 +1214,7 @@ func (f *Fs) put(
|
||||
// and skips the "EOF" read. Hence, switch to next limit here.
|
||||
if !(c.chunkLimit == 0 || c.chunkLimit == c.chunkSize || c.sizeTotal == -1 || c.done) {
|
||||
silentlyRemove(ctx, chunk)
|
||||
return nil, fmt.Errorf("destination ignored %d data bytes", c.chunkLimit)
|
||||
return nil, fmt.Errorf("Destination ignored %d data bytes", c.chunkLimit)
|
||||
}
|
||||
c.chunkLimit = c.chunkSize
|
||||
|
||||
@@ -1234,7 +1223,7 @@ func (f *Fs) put(
|
||||
|
||||
// Validate uploaded size
|
||||
if c.sizeTotal != -1 && c.readCount != c.sizeTotal {
|
||||
return nil, fmt.Errorf("incorrect upload size %d != %d", c.readCount, c.sizeTotal)
|
||||
return nil, fmt.Errorf("Incorrect upload size %d != %d", c.readCount, c.sizeTotal)
|
||||
}
|
||||
|
||||
// Check for input that looks like valid metadata
|
||||
@@ -1271,7 +1260,7 @@ func (f *Fs) put(
|
||||
sizeTotal += chunk.Size()
|
||||
}
|
||||
if sizeTotal != c.readCount {
|
||||
return nil, fmt.Errorf("incorrect chunks size %d != %d", sizeTotal, c.readCount)
|
||||
return nil, fmt.Errorf("Incorrect chunks size %d != %d", sizeTotal, c.readCount)
|
||||
}
|
||||
|
||||
// If previous object was chunked, remove its chunks
|
||||
@@ -1459,7 +1448,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
|
||||
c.accountBytes(size)
|
||||
return nil
|
||||
}
|
||||
const bufLen = 1048576 // 1 MiB
|
||||
const bufLen = 1048576 // 1MB
|
||||
buf := make([]byte, bufLen)
|
||||
for size > 0 {
|
||||
n := size
|
||||
@@ -1564,7 +1553,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
if err := f.forbidChunk(dir, dir); err != nil {
|
||||
return fmt.Errorf("can't mkdir: %w", err)
|
||||
return errors.Wrap(err, "can't mkdir")
|
||||
}
|
||||
return f.base.Mkdir(ctx, dir)
|
||||
}
|
||||
@@ -1633,7 +1622,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
||||
// operations.Move can still call Remove if chunker's Move refuses
|
||||
// to corrupt file in hard mode. Hence, refuse to Remove, too.
|
||||
return fmt.Errorf("refuse to corrupt: %w", err)
|
||||
return errors.Wrap(err, "refuse to corrupt")
|
||||
}
|
||||
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
|
||||
// Proceed but warn user that unexpected things can happen.
|
||||
@@ -1661,12 +1650,12 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// copyOrMove implements copy or move
|
||||
func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMoveFn, md5, sha1, opName string) (fs.Object, error) {
|
||||
if err := f.forbidChunk(o, remote); err != nil {
|
||||
return nil, fmt.Errorf("can't %s: %w", opName, err)
|
||||
return nil, errors.Wrapf(err, "can't %s", opName)
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// Refuse to copy/move composite files with invalid or future
|
||||
// metadata format which might involve unsupported chunk types.
|
||||
return nil, fmt.Errorf("can't %s this file: %w", opName, err)
|
||||
return nil, errors.Wrapf(err, "can't %s this file", opName)
|
||||
}
|
||||
if !o.isComposite() {
|
||||
fs.Debugf(o, "%s non-chunked object...", opName)
|
||||
@@ -2163,7 +2152,7 @@ func (o *Object) UnWrap() fs.Object {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// refuse to open unsupported format
|
||||
return nil, fmt.Errorf("can't open: %w", err)
|
||||
return nil, errors.Wrap(err, "can't open")
|
||||
}
|
||||
if !o.isComposite() {
|
||||
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
||||
@@ -2451,7 +2440,7 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
|
||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
||||
// Be strict about JSON format
|
||||
// to reduce possibility that a random small file resembles metadata.
|
||||
if len(data) > maxMetadataSizeWritten {
|
||||
if data != nil && len(data) > maxMetadataSizeWritten {
|
||||
return nil, false, ErrMetaTooBig
|
||||
}
|
||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
|
||||
@@ -12,8 +12,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
@@ -35,35 +33,11 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
||||
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
||||
Size: int64(kilobytes) * int64(fs.Kibi),
|
||||
Size: int64(kilobytes) * int64(fs.KibiByte),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
type settings map[string]interface{}
|
||||
|
||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||
configMap := configmap.Simple{}
|
||||
for key, val := range opts {
|
||||
configMap[key] = fmt.Sprintf("%v", val)
|
||||
}
|
||||
rpath := fspath.JoinRootPath(f.Root(), path)
|
||||
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), rpath)
|
||||
fixFs, err := fs.NewFs(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
return fixFs
|
||||
}
|
||||
|
||||
var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
assert.NotNil(t, obj, message)
|
||||
return obj
|
||||
}
|
||||
|
||||
// test chunk name parser
|
||||
func testChunkNameFormat(t *testing.T, f *Fs) {
|
||||
saveOpt := f.opt
|
||||
@@ -643,13 +617,22 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
}()
|
||||
f.opt.FailHard = false
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||
item := fstest.Item{Path: name, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
assert.NotNil(t, obj, message)
|
||||
return obj
|
||||
}
|
||||
|
||||
runSubtest := func(contents, name string) {
|
||||
description := fmt.Sprintf("file with %s metadata", name)
|
||||
filename := path.Join(dir, name)
|
||||
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
||||
|
||||
part := testPutFile(ctx, t, f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
||||
_ = testPutFile(ctx, t, f, filename, contents, "upload "+description, false)
|
||||
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
||||
_ = putFile(f, filename, contents, "upload "+description, false)
|
||||
|
||||
obj, err := f.NewObject(ctx, filename)
|
||||
assert.NoError(t, err, "access "+description)
|
||||
@@ -695,7 +678,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
|
||||
// Test that chunker refuses to change on objects with future/unknown metadata
|
||||
func testFutureProof(t *testing.T, f *Fs) {
|
||||
if !f.useMeta {
|
||||
if f.opt.MetaFormat == "none" {
|
||||
t.Skip("this test requires metadata support")
|
||||
}
|
||||
|
||||
@@ -861,44 +844,6 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
}
|
||||
|
||||
// Test that md5all creates metadata even for small files
|
||||
func testMD5AllSlow(t *testing.T, f *Fs) {
|
||||
ctx := context.Background()
|
||||
fsResult := deriveFs(ctx, t, f, "md5all", settings{
|
||||
"chunk_size": "1P",
|
||||
"name_format": "*.#",
|
||||
"hash_type": "md5all",
|
||||
"transactions": "rename",
|
||||
"meta_format": "simplejson",
|
||||
})
|
||||
chunkFs, ok := fsResult.(*Fs)
|
||||
require.True(t, ok, "fs must be a chunker remote")
|
||||
baseFs := chunkFs.base
|
||||
if !baseFs.Features().SlowHash {
|
||||
t.Skipf("this test needs a base fs with slow hash, e.g. local")
|
||||
}
|
||||
|
||||
assert.True(t, chunkFs.useMD5, "must use md5")
|
||||
assert.True(t, chunkFs.hashAll, "must hash all files")
|
||||
|
||||
_ = testPutFile(ctx, t, chunkFs, "file", "-", "error", true)
|
||||
obj, err := chunkFs.NewObject(ctx, "file")
|
||||
require.NoError(t, err)
|
||||
sum, err := obj.Hash(ctx, hash.MD5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "336d5ebc5436534e61d16e63ddfca327", sum)
|
||||
|
||||
list, err := baseFs.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(list))
|
||||
_, err = baseFs.NewObject(ctx, "file")
|
||||
assert.NoError(t, err, "metadata must be created")
|
||||
_, err = baseFs.NewObject(ctx, "file.1")
|
||||
assert.NoError(t, err, "first chunk must be created")
|
||||
|
||||
require.NoError(t, operations.Purge(ctx, baseFs, ""))
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PutLarge", func(t *testing.T) {
|
||||
@@ -931,9 +876,6 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("ChunkerServerSideMove", func(t *testing.T) {
|
||||
testChunkerServerSideMove(t, f)
|
||||
})
|
||||
t.Run("MD5AllSlow", func(t *testing.T) {
|
||||
testMD5AllSlow(t, f)
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -22,6 +21,7 @@ import (
|
||||
"github.com/buengese/sgzip"
|
||||
"github.com/gabriel-vasile/mimetype"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunkedreader"
|
||||
@@ -36,7 +36,7 @@ import (
|
||||
// Globals
|
||||
const (
|
||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
||||
maxChunkSize = 8388608 // at 256KB and 8 MB.
|
||||
|
||||
bufferSize = 8388608
|
||||
heuristicBytes = 1048576
|
||||
@@ -53,7 +53,7 @@ const (
|
||||
Gzip = 2
|
||||
)
|
||||
|
||||
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9-_]{11})$")
|
||||
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9+_]{11})$")
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
@@ -83,23 +83,23 @@ func init() {
|
||||
Name: "level",
|
||||
Help: `GZIP compression level (-2 to 9).
|
||||
|
||||
Generally -1 (default, equivalent to 5) is recommended.
|
||||
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
||||
generally offers very little return.
|
||||
|
||||
Level -2 uses Huffmann encoding only. Only use if you know what you
|
||||
are doing.
|
||||
Level 0 turns off compression.`,
|
||||
Generally -1 (default, equivalent to 5) is recommended.
|
||||
Levels 1 to 9 increase compressiong at the cost of speed.. Going past 6
|
||||
generally offers very little return.
|
||||
|
||||
Level -2 uses Huffmann encoding only. Only use if you now what you
|
||||
are doing
|
||||
Level 0 turns off compression.`,
|
||||
Default: sgzip.DefaultCompression,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "ram_cache_limit",
|
||||
Help: `Some remotes don't allow the upload of files with unknown size.
|
||||
In this case the compressed file will need to be cached to determine
|
||||
it's size.
|
||||
|
||||
Files smaller than this limit will be cached in RAM, files larger than
|
||||
this limit will be cached on disk.`,
|
||||
In this case the compressed file will need to be cached to determine
|
||||
it's size.
|
||||
|
||||
Files smaller than this limit will be cached in RAM, file larger than
|
||||
this limit will be cached on disk`,
|
||||
Default: fs.SizeSuffix(20 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -143,7 +143,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
|
||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
|
||||
// Strip trailing slashes if they exist in rpath
|
||||
@@ -158,7 +158,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
wrappedFs, err = wInfo.NewFs(ctx, wName, remotePath, wConfig)
|
||||
}
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
|
||||
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
|
||||
}
|
||||
|
||||
// Create the wrapping fs
|
||||
@@ -304,7 +304,7 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
||||
case fs.Directory:
|
||||
f.addDir(&newEntries, x)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown object type %T", entry)
|
||||
return nil, errors.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
return newEntries, nil
|
||||
@@ -410,7 +410,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
||||
srcHash := hasher.Sums()[ht]
|
||||
dstHash, err := o.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read destination hash: %w", err)
|
||||
return errors.Wrap(err, "failed to read destination hash")
|
||||
}
|
||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||
// remove object
|
||||
@@ -418,7 +418,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
|
||||
return errors.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -462,10 +462,10 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to create temporary local FS to spool file: %w", err)
|
||||
return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file")
|
||||
}
|
||||
if _, err = io.Copy(tempFile, in); err != nil {
|
||||
return nil, fmt.Errorf("Failed to write temporary local file: %w", err)
|
||||
return nil, errors.Wrap(err, "Failed to write temporary local file")
|
||||
}
|
||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||
return nil, err
|
||||
@@ -714,7 +714,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
|
||||
err = oldObj.(*Object).Object.Remove(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could remove original object: %w", err)
|
||||
return nil, errors.Wrap(err, "Could remove original object")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -723,7 +723,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
if compressible {
|
||||
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Couldn't rename streamed Object.: %w", err)
|
||||
return nil, errors.Wrap(err, "Couldn't rename streamed Object.")
|
||||
}
|
||||
newObj.Object = wrapObj
|
||||
}
|
||||
@@ -1260,7 +1260,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
return o.Object.Open(ctx, options...)
|
||||
}
|
||||
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
|
||||
var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
|
||||
var openOptions []fs.OpenOption = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
|
||||
@@ -7,21 +7,17 @@ import (
|
||||
gocipher "crypto/cipher"
|
||||
"crypto/rand"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/Max-Sum/base32768"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"github.com/rfjakob/eme"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
@@ -96,12 +92,12 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
|
||||
case "obfuscate":
|
||||
mode = NameEncryptionObfuscated
|
||||
default:
|
||||
err = fmt.Errorf("Unknown file name encryption mode %q", s)
|
||||
err = errors.Errorf("Unknown file name encryption mode %q", s)
|
||||
}
|
||||
return mode, err
|
||||
}
|
||||
|
||||
// String turns mode into a human-readable string
|
||||
// String turns mode into a human readable string
|
||||
func (mode NameEncryptionMode) String() (out string) {
|
||||
switch mode {
|
||||
case NameEncryptionOff:
|
||||
@@ -116,57 +112,6 @@ func (mode NameEncryptionMode) String() (out string) {
|
||||
return out
|
||||
}
|
||||
|
||||
// fileNameEncoding are the encoding methods dealing with encrypted file names
|
||||
type fileNameEncoding interface {
|
||||
EncodeToString(src []byte) string
|
||||
DecodeString(s string) ([]byte, error)
|
||||
}
|
||||
|
||||
// caseInsensitiveBase32Encoding defines a file name encoding
|
||||
// using a modified version of standard base32 as described in
|
||||
// RFC4648
|
||||
//
|
||||
// The standard encoding is modified in two ways
|
||||
// * it becomes lower case (no-one likes upper case filenames!)
|
||||
// * we strip the padding character `=`
|
||||
type caseInsensitiveBase32Encoding struct{}
|
||||
|
||||
// EncodeToString encodes a strign using the modified version of
|
||||
// base32 encoding.
|
||||
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
|
||||
encoded := base32.HexEncoding.EncodeToString(src)
|
||||
encoded = strings.TrimRight(encoded, "=")
|
||||
return strings.ToLower(encoded)
|
||||
}
|
||||
|
||||
// DecodeString decodes a string as encoded by EncodeToString
|
||||
func (caseInsensitiveBase32Encoding) DecodeString(s string) ([]byte, error) {
|
||||
if strings.HasSuffix(s, "=") {
|
||||
return nil, ErrorBadBase32Encoding
|
||||
}
|
||||
// First figure out how many padding characters to add
|
||||
roundUpToMultipleOf8 := (len(s) + 7) &^ 7
|
||||
equals := roundUpToMultipleOf8 - len(s)
|
||||
s = strings.ToUpper(s) + "========"[:equals]
|
||||
return base32.HexEncoding.DecodeString(s)
|
||||
}
|
||||
|
||||
// NewNameEncoding creates a NameEncoding from a string
|
||||
func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
case "base32":
|
||||
enc = caseInsensitiveBase32Encoding{}
|
||||
case "base64":
|
||||
enc = base64.RawURLEncoding
|
||||
case "base32768":
|
||||
enc = base32768.SafeEncoding
|
||||
default:
|
||||
err = fmt.Errorf("Unknown file name encoding mode %q", s)
|
||||
}
|
||||
return enc, err
|
||||
}
|
||||
|
||||
// Cipher defines an encoding and decoding cipher for the crypt backend
|
||||
type Cipher struct {
|
||||
dataKey [32]byte // Key for secretbox
|
||||
@@ -174,17 +119,15 @@ type Cipher struct {
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
block gocipher.Block
|
||||
mode NameEncryptionMode
|
||||
fileNameEnc fileNameEncoding
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
}
|
||||
|
||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
|
||||
c := &Cipher{
|
||||
mode: mode,
|
||||
fileNameEnc: enc,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
}
|
||||
@@ -242,6 +185,30 @@ func (c *Cipher) putBlock(buf []byte) {
|
||||
c.buffers.Put(buf)
|
||||
}
|
||||
|
||||
// encodeFileName encodes a filename using a modified version of
|
||||
// standard base32 as described in RFC4648
|
||||
//
|
||||
// The standard encoding is modified in two ways
|
||||
// * it becomes lower case (no-one likes upper case filenames!)
|
||||
// * we strip the padding character `=`
|
||||
func encodeFileName(in []byte) string {
|
||||
encoded := base32.HexEncoding.EncodeToString(in)
|
||||
encoded = strings.TrimRight(encoded, "=")
|
||||
return strings.ToLower(encoded)
|
||||
}
|
||||
|
||||
// decodeFileName decodes a filename as encoded by encodeFileName
|
||||
func decodeFileName(in string) ([]byte, error) {
|
||||
if strings.HasSuffix(in, "=") {
|
||||
return nil, ErrorBadBase32Encoding
|
||||
}
|
||||
// First figure out how many padding characters to add
|
||||
roundUpToMultipleOf8 := (len(in) + 7) &^ 7
|
||||
equals := roundUpToMultipleOf8 - len(in)
|
||||
in = strings.ToUpper(in) + "========"[:equals]
|
||||
return base32.HexEncoding.DecodeString(in)
|
||||
}
|
||||
|
||||
// encryptSegment encrypts a path segment
|
||||
//
|
||||
// This uses EME with AES
|
||||
@@ -262,7 +229,7 @@ func (c *Cipher) encryptSegment(plaintext string) string {
|
||||
}
|
||||
paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext))
|
||||
ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt)
|
||||
return c.fileNameEnc.EncodeToString(ciphertext)
|
||||
return encodeFileName(ciphertext)
|
||||
}
|
||||
|
||||
// decryptSegment decrypts a path segment
|
||||
@@ -270,7 +237,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
if ciphertext == "" {
|
||||
return "", nil
|
||||
}
|
||||
rawCiphertext, err := c.fileNameEnc.DecodeString(ciphertext)
|
||||
rawCiphertext, err := decodeFileName(ciphertext)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -475,32 +442,11 @@ func (c *Cipher) encryptFileName(in string) string {
|
||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Strip version string so that only the non-versioned part
|
||||
// of the file name gets encrypted/obfuscated
|
||||
hasVersion := false
|
||||
var t time.Time
|
||||
if i == (len(segments)-1) && version.Match(segments[i]) {
|
||||
var s string
|
||||
t, s = version.Remove(segments[i])
|
||||
// version.Remove can fail, in which case it returns segments[i]
|
||||
if s != segments[i] {
|
||||
segments[i] = s
|
||||
hasVersion = true
|
||||
}
|
||||
}
|
||||
|
||||
if c.mode == NameEncryptionStandard {
|
||||
segments[i] = c.encryptSegment(segments[i])
|
||||
} else {
|
||||
segments[i] = c.obfuscateSegment(segments[i])
|
||||
}
|
||||
|
||||
// Add back a version to the encrypted/obfuscated
|
||||
// file name, if we stripped it off earlier
|
||||
if hasVersion {
|
||||
segments[i] = version.Add(segments[i], t)
|
||||
}
|
||||
}
|
||||
return strings.Join(segments, "/")
|
||||
}
|
||||
@@ -531,21 +477,6 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Strip version string so that only the non-versioned part
|
||||
// of the file name gets decrypted/deobfuscated
|
||||
hasVersion := false
|
||||
var t time.Time
|
||||
if i == (len(segments)-1) && version.Match(segments[i]) {
|
||||
var s string
|
||||
t, s = version.Remove(segments[i])
|
||||
// version.Remove can fail, in which case it returns segments[i]
|
||||
if s != segments[i] {
|
||||
segments[i] = s
|
||||
hasVersion = true
|
||||
}
|
||||
}
|
||||
|
||||
if c.mode == NameEncryptionStandard {
|
||||
segments[i], err = c.decryptSegment(segments[i])
|
||||
} else {
|
||||
@@ -555,12 +486,6 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Add back a version to the decrypted/deobfuscated
|
||||
// file name, if we stripped it off earlier
|
||||
if hasVersion {
|
||||
segments[i] = version.Add(segments[i], t)
|
||||
}
|
||||
}
|
||||
return strings.Join(segments, "/"), nil
|
||||
}
|
||||
@@ -569,18 +494,10 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||
if c.mode == NameEncryptionOff {
|
||||
remainingLength := len(in) - len(encryptedSuffix)
|
||||
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
|
||||
return in[:remainingLength], nil
|
||||
}
|
||||
decrypted := in[:remainingLength]
|
||||
if version.Match(decrypted) {
|
||||
_, unversioned := version.Remove(decrypted)
|
||||
if unversioned == "" {
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
}
|
||||
}
|
||||
// Leave the version string on, if it was there
|
||||
return decrypted, nil
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
}
|
||||
return c.decryptFileName(in)
|
||||
}
|
||||
@@ -611,7 +528,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
|
||||
func (n *nonce) fromReader(in io.Reader) error {
|
||||
read, err := io.ReadFull(in, (*n)[:])
|
||||
if read != fileNonceSize {
|
||||
return fmt.Errorf("short read of nonce: %w", err)
|
||||
return errors.Wrap(err, "short read of nonce")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -987,7 +904,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
|
||||
// Re-open the underlying object with the offset given
|
||||
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
|
||||
if err != nil {
|
||||
return 0, fh.finish(fmt.Errorf("couldn't reopen file with offset and limit: %w", err))
|
||||
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
|
||||
}
|
||||
|
||||
// Set the file handle
|
||||
|
||||
@@ -4,15 +4,13 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Max-Sum/base32768"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -47,31 +45,11 @@ func TestNewNameEncryptionModeString(t *testing.T) {
|
||||
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
|
||||
}
|
||||
|
||||
type EncodingTestCase struct {
|
||||
in string
|
||||
expected string
|
||||
}
|
||||
|
||||
func testEncodeFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
for _, test := range testCases {
|
||||
enc, err := NewNameEncoding(encoding)
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
actual := enc.EncodeToString([]byte(test.in))
|
||||
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
|
||||
recovered, err := enc.DecodeString(test.expected)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
|
||||
if caseInsensitive {
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = enc.DecodeString(in)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeFileNameBase32(t *testing.T) {
|
||||
testEncodeFileName(t, "base32", []EncodingTestCase{
|
||||
func TestEncodeFileName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{"", ""},
|
||||
{"1", "64"},
|
||||
{"12", "64p0"},
|
||||
@@ -89,56 +67,20 @@ func TestEncodeFileNameBase32(t *testing.T) {
|
||||
{"12345678901234", "64p36d1l6orjge9g64p36d0"},
|
||||
{"123456789012345", "64p36d1l6orjge9g64p36d1l"},
|
||||
{"1234567890123456", "64p36d1l6orjge9g64p36d1l6o"},
|
||||
}, true)
|
||||
} {
|
||||
actual := encodeFileName([]byte(test.in))
|
||||
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
|
||||
recovered, err := decodeFileName(test.expected)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = decodeFileName(in)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeFileNameBase64(t *testing.T) {
|
||||
testEncodeFileName(t, "base64", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "MQ"},
|
||||
{"12", "MTI"},
|
||||
{"123", "MTIz"},
|
||||
{"1234", "MTIzNA"},
|
||||
{"12345", "MTIzNDU"},
|
||||
{"123456", "MTIzNDU2"},
|
||||
{"1234567", "MTIzNDU2Nw"},
|
||||
{"12345678", "MTIzNDU2Nzg"},
|
||||
{"123456789", "MTIzNDU2Nzg5"},
|
||||
{"1234567890", "MTIzNDU2Nzg5MA"},
|
||||
{"12345678901", "MTIzNDU2Nzg5MDE"},
|
||||
{"123456789012", "MTIzNDU2Nzg5MDEy"},
|
||||
{"1234567890123", "MTIzNDU2Nzg5MDEyMw"},
|
||||
{"12345678901234", "MTIzNDU2Nzg5MDEyMzQ"},
|
||||
{"123456789012345", "MTIzNDU2Nzg5MDEyMzQ1"},
|
||||
{"1234567890123456", "MTIzNDU2Nzg5MDEyMzQ1Ng"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestEncodeFileNameBase32768(t *testing.T) {
|
||||
testEncodeFileName(t, "base32768", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "㼿"},
|
||||
{"12", "㻙ɟ"},
|
||||
{"123", "㻙ⲿ"},
|
||||
{"1234", "㻙ⲍƟ"},
|
||||
{"12345", "㻙ⲍ⍟"},
|
||||
{"123456", "㻙ⲍ⍆ʏ"},
|
||||
{"1234567", "㻙ⲍ⍆觟"},
|
||||
{"12345678", "㻙ⲍ⍆觓ɧ"},
|
||||
{"123456789", "㻙ⲍ⍆觓栯"},
|
||||
{"1234567890", "㻙ⲍ⍆觓栩ɣ"},
|
||||
{"12345678901", "㻙ⲍ⍆觓栩朧"},
|
||||
{"123456789012", "㻙ⲍ⍆觓栩朤ʅ"},
|
||||
{"1234567890123", "㻙ⲍ⍆觓栩朤談"},
|
||||
{"12345678901234", "㻙ⲍ⍆觓栩朤諆ɔ"},
|
||||
{"123456789012345", "㻙ⲍ⍆觓栩朤諆媕"},
|
||||
{"1234567890123456", "㻙ⲍ⍆觓栩朤諆媕䆿"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestDecodeFileNameBase32(t *testing.T) {
|
||||
enc, err := NewNameEncoding("base32")
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
func TestDecodeFileName(t *testing.T) {
|
||||
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -148,65 +90,17 @@ func TestDecodeFileNameBase32(t *testing.T) {
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{"hello=hello", base32.CorruptInputError(5)},
|
||||
} {
|
||||
actual, actualErr := enc.DecodeString(test.in)
|
||||
actual, actualErr := decodeFileName(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeFileNameBase64(t *testing.T) {
|
||||
enc, err := NewNameEncoding("base64")
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
||||
func TestEncryptSegment(t *testing.T) {
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{"64=", base64.CorruptInputError(2)},
|
||||
{"!", base64.CorruptInputError(0)},
|
||||
{"Hello=Hello", base64.CorruptInputError(5)},
|
||||
} {
|
||||
actual, actualErr := enc.DecodeString(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeFileNameBase32768(t *testing.T) {
|
||||
enc, err := NewNameEncoding("base32768")
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
}{
|
||||
{"㼿c", base32768.CorruptInputError(1)},
|
||||
{"!", base32768.CorruptInputError(0)},
|
||||
{"㻙ⲿ=㻙ⲿ", base32768.CorruptInputError(2)},
|
||||
} {
|
||||
actual, actualErr := enc.DecodeString(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func testEncryptSegment(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range testCases {
|
||||
actual := c.encryptSegment(test.in)
|
||||
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
|
||||
recovered, err := c.decryptSegment(test.expected)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
if caseInsensitive {
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = c.decryptSegment(in)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryptSegmentBase32(t *testing.T) {
|
||||
testEncryptSegment(t, "base32", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"12", "l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
@@ -224,61 +118,26 @@ func TestEncryptSegmentBase32(t *testing.T) {
|
||||
{"12345678901234", "moq0uqdlqrblrc5pa5u5c7hq9g"},
|
||||
{"123456789012345", "eeam3li4rnommi3a762h5n7meg"},
|
||||
{"1234567890123456", "mijbj0frqf6ms7frcr6bd9h0env53jv96pjaaoirk7forcgpt70g"},
|
||||
}, true)
|
||||
} {
|
||||
actual := c.encryptSegment(test.in)
|
||||
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
|
||||
recovered, err := c.decryptSegment(test.expected)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = c.decryptSegment(in)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryptSegmentBase64(t *testing.T) {
|
||||
testEncryptSegment(t, "base64", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"12", "qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"123", "1CxFf2Mti1xIPYlGruDh-A"},
|
||||
{"1234", "RL-xOTmsxsG7kuTy2XJUxw"},
|
||||
{"12345", "3FP_GHoeBJdq0yLgaED8IQ"},
|
||||
{"123456", "Xc4T1Gqrs3OVYnrE6dpEWQ"},
|
||||
{"1234567", "uZeEzssOnDWHEOzLqjwpog"},
|
||||
{"12345678", "8noiTP5WkkbEuijsPhOpxQ"},
|
||||
{"123456789", "GeNxgLA0wiaGAKU3U7qL4Q"},
|
||||
{"1234567890", "x1DUhdmqoVWYVBLD3dha-A"},
|
||||
{"12345678901", "iEyP_3BZR6vvv_2WM6NbZw"},
|
||||
{"123456789012", "4OPGvS4SZdjvS568APUaFw"},
|
||||
{"1234567890123", "Y8c5Wr8OhYYUo7fPwdojdg"},
|
||||
{"12345678901234", "tjQPabXW112wuVF8Vh46TA"},
|
||||
{"123456789012345", "c5Vh1kTd8WtIajmFEtz2dA"},
|
||||
{"1234567890123456", "tKa5gfvTzW4d-2bMtqYgdf5Rz-k2ZqViW6HfjbIZ6cE"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestEncryptSegmentBase32768(t *testing.T) {
|
||||
testEncryptSegment(t, "base32768", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"12", "竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"123", "遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
{"1234", "䢟銮䵵狌㐜燳谒颴詟"},
|
||||
{"12345", "钉Ꞇ㖃蚩憶狫朰杜㜿"},
|
||||
{"123456", "啇ᚵⵕ憗䋫➫➓肤卟"},
|
||||
{"1234567", "茫螓翁連劘樓㶔抉矟"},
|
||||
{"12345678", "龝☳䘊辄岅較络㧩襟"},
|
||||
{"123456789", "ⲱ苀㱆犂媐Ꮤ锇惫靟"},
|
||||
{"1234567890", "計宁憕偵匢皫╛纺ꌟ"},
|
||||
{"12345678901", "檆䨿鑫㪺藝ꡖ勇䦛婟"},
|
||||
{"123456789012", "雑頏䰂䲝淚哚鹡魺⪟"},
|
||||
{"1234567890123", "塃璶繁躸圅㔟䗃肃懟"},
|
||||
{"12345678901234", "腺ᕚ崚鏕鏥讥鼌䑺䲿"},
|
||||
{"123456789012345", "怪绕滻蕶肣但⠥荖惟"},
|
||||
{"1234567890123456", "肳哀旚挶靏鏻㾭䱠慟㪳ꏆ賊兲铧敻塹魀ʟ"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestDecryptSegmentBase32(t *testing.T) {
|
||||
func TestDecryptSegment(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 3328)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
enc, _ := NewNameEncoding("base32")
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
@@ -286,371 +145,42 @@ func TestDecryptSegmentBase32(t *testing.T) {
|
||||
{"64=", ErrorBadBase32Encoding},
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecryptSegmentBase64(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 2816)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
enc, _ := NewNameEncoding("base64")
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
}{
|
||||
{"6H=", base64.CorruptInputError(2)},
|
||||
{"!", base64.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecryptSegmentBase32768(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := strings.Repeat("怪", 1280)
|
||||
enc, _ := NewNameEncoding("base32768")
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
}{
|
||||
{"怪=", base32768.CorruptInputError(1)},
|
||||
{"!", base32768.CorruptInputError(0)},
|
||||
{longName, ErrorTooLongAfterDecode},
|
||||
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func testStandardEncryptFileName(t *testing.T, encoding string, testCasesEncryptDir []EncodingTestCase, testCasesNoEncryptDir []EncodingTestCase) {
|
||||
func TestEncryptFileName(t *testing.T) {
|
||||
// First standard mode
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range testCasesEncryptDir {
|
||||
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
|
||||
}
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||
// Standard mode with directory name encryption off
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
for _, test := range testCasesNoEncryptDir {
|
||||
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStandardEncryptFileNameBase32(t *testing.T) {
|
||||
testStandardEncryptFileName(t, "base32", []EncodingTestCase{
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
|
||||
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
|
||||
}, []EncodingTestCase{
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"1/12", "1/l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
{"1/12/123", "1/12/qgm4avr35m5loi1th53ato71v0"},
|
||||
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptFileNameBase64(t *testing.T) {
|
||||
testStandardEncryptFileName(t, "base64", []EncodingTestCase{
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
|
||||
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
|
||||
}, []EncodingTestCase{
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"1/12", "1/qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"1/12/123", "1/12/1CxFf2Mti1xIPYlGruDh-A"},
|
||||
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "1/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptFileNameBase32768(t *testing.T) {
|
||||
testStandardEncryptFileName(t, "base32768", []EncodingTestCase{
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
|
||||
}, []EncodingTestCase{
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"1/12", "1/竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"1/12/123", "1/12/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "1/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNonStandardEncryptFileName(t *testing.T) {
|
||||
// Off mode
|
||||
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||
// Now off mode
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||
// Obfuscation mode
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
|
||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
|
||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||
// Obfuscation mode with directory name encryption off
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", false, nil)
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
|
||||
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||
}
|
||||
|
||||
func testStandardDecryptFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range testCases {
|
||||
// Test when dirNameEncrypt=true
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
assert.NoError(t, actualErr)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
if caseInsensitive {
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
actual, actualErr := c.DecryptFileName(strings.ToUpper(test.in))
|
||||
assert.NoError(t, actualErr)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
}
|
||||
// Add a character should raise ErrorNotAMultipleOfBlocksize
|
||||
actual, actualErr = c.DecryptFileName(enc.EncodeToString([]byte("1")) + test.in)
|
||||
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
|
||||
assert.Equal(t, "", actual)
|
||||
// Test when dirNameEncrypt=false
|
||||
noDirEncryptIn := test.in
|
||||
if strings.LastIndex(test.expected, "/") != -1 {
|
||||
noDirEncryptIn = test.expected[:strings.LastIndex(test.expected, "/")] + test.in[strings.LastIndex(test.in, "/"):]
|
||||
}
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
actual, actualErr = c.DecryptFileName(noDirEncryptIn)
|
||||
assert.NoError(t, actualErr)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStandardDecryptFileNameBase32(t *testing.T) {
|
||||
testStandardDecryptFileName(t, "base32", []EncodingTestCase{
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
|
||||
}, true)
|
||||
}
|
||||
|
||||
func TestStandardDecryptFileNameBase64(t *testing.T) {
|
||||
testStandardDecryptFileName(t, "base64", []EncodingTestCase{
|
||||
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestStandardDecryptFileNameBase32768(t *testing.T) {
|
||||
testStandardDecryptFileName(t, "base32768", []EncodingTestCase{
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestNonStandardDecryptFileName(t *testing.T) {
|
||||
for _, encoding := range []string{"base32", "base64", "base32768"} {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
assert.Equal(t, test.expectedErr, actualErr, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncDecMatches(t *testing.T) {
|
||||
for _, encoding := range []string{"base32", "base64", "base32768"} {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
in string
|
||||
}{
|
||||
{NameEncryptionStandard, "1/2/3/4"},
|
||||
{NameEncryptionOff, "1/2/3/4"},
|
||||
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
|
||||
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", true, enc)
|
||||
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, out, test.in, what)
|
||||
assert.Equal(t, err, nil, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testStandardEncryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
// First standard mode
|
||||
for _, test := range testCases {
|
||||
assert.Equal(t, test.expected, c.EncryptDirName(test.in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStandardEncryptDirNameBase32(t *testing.T) {
|
||||
testStandardEncryptDirName(t, "base32", []EncodingTestCase{
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptDirNameBase64(t *testing.T) {
|
||||
testStandardEncryptDirName(t, "base64", []EncodingTestCase{
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptDirNameBase32768(t *testing.T) {
|
||||
testStandardEncryptDirName(t, "base32768", []EncodingTestCase{
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNonStandardEncryptDirName(t *testing.T) {
|
||||
for _, encoding := range []string{"base32", "base64", "base32768"} {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
// Now off mode
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true, enc)
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
}
|
||||
}
|
||||
|
||||
func testStandardDecryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range testCases {
|
||||
// Test dirNameEncrypt=true
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
actual, actualErr := c.DecryptDirName(test.in)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
assert.NoError(t, actualErr)
|
||||
if caseInsensitive {
|
||||
actual, actualErr := c.DecryptDirName(strings.ToUpper(test.in))
|
||||
assert.Equal(t, actual, test.expected)
|
||||
assert.NoError(t, actualErr)
|
||||
}
|
||||
actual, actualErr = c.DecryptDirName(enc.EncodeToString([]byte("1")) + test.in)
|
||||
assert.Equal(t, "", actual)
|
||||
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
|
||||
// Test dirNameEncrypt=false
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
actual, actualErr = c.DecryptDirName(test.in)
|
||||
assert.Equal(t, test.in, actual)
|
||||
assert.NoError(t, actualErr)
|
||||
actual, actualErr = c.DecryptDirName(test.expected)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
assert.NoError(t, actualErr)
|
||||
// Test dirNameEncrypt=false
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
|
||||
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||
actual, actualErr := c.DecryptDirName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
assert.Equal(t, test.expectedErr, actualErr, what)
|
||||
}
|
||||
*/
|
||||
|
||||
func TestStandardDecryptDirNameBase32(t *testing.T) {
|
||||
testStandardDecryptDirName(t, "base32", []EncodingTestCase{
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
|
||||
}, true)
|
||||
}
|
||||
|
||||
func TestStandardDecryptDirNameBase64(t *testing.T) {
|
||||
testStandardDecryptDirName(t, "base64", []EncodingTestCase{
|
||||
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestStandardDecryptDirNameBase32768(t *testing.T) {
|
||||
testStandardDecryptDirName(t, "base32768", []EncodingTestCase{
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestNonStandardDecryptDirName(t *testing.T) {
|
||||
func TestDecryptFileName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
@@ -658,11 +188,82 @@ func TestNonStandardDecryptDirName(t *testing.T) {
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
assert.Equal(t, test.expectedErr, actualErr, what)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncDecMatches(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
in string
|
||||
}{
|
||||
{NameEncryptionStandard, "1/2/3/4"},
|
||||
{NameEncryptionOff, "1/2/3/4"},
|
||||
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
|
||||
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", true)
|
||||
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, out, test.in, what)
|
||||
assert.Equal(t, err, nil, what)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryptDirName(t *testing.T) {
|
||||
// First standard mode
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptDirName("1"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptDirName("1/12"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptDirName("1/12/123"))
|
||||
// Standard mode with dir name encryption off
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
||||
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
// Now off mode
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
}
|
||||
|
||||
func TestDecryptDirName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
|
||||
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123.bin", nil},
|
||||
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, ".bin", ".bin", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, nil)
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
||||
actual, actualErr := c.DecryptDirName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
@@ -671,7 +272,7 @@ func TestNonStandardDecryptDirName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEncryptedSize(t *testing.T) {
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in int64
|
||||
expected int64
|
||||
@@ -695,7 +296,7 @@ func TestEncryptedSize(t *testing.T) {
|
||||
|
||||
func TestDecryptedSize(t *testing.T) {
|
||||
// Test the errors since we tested the reverse above
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in int64
|
||||
expectedErr error
|
||||
@@ -1024,7 +625,7 @@ func (r *randomSource) Read(p []byte) (n int, err error) {
|
||||
func (r *randomSource) Write(p []byte) (n int, err error) {
|
||||
for i := range p {
|
||||
if p[i] != r.next() {
|
||||
return 0, fmt.Errorf("Error in stream at %d", r.counter)
|
||||
return 0, errors.Errorf("Error in stream at %d", r.counter)
|
||||
}
|
||||
}
|
||||
return len(p), nil
|
||||
@@ -1066,7 +667,7 @@ func (z *zeroes) Read(p []byte) (n int, err error) {
|
||||
|
||||
// Test encrypt decrypt with different buffer sizes
|
||||
func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = &zeroes{} // zero out the nonce
|
||||
buf := make([]byte, bufSize)
|
||||
@@ -1136,7 +737,7 @@ func TestEncryptData(t *testing.T) {
|
||||
{[]byte{1}, file1},
|
||||
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, file16},
|
||||
} {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
@@ -1159,7 +760,7 @@ func TestEncryptData(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewEncrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
@@ -1175,12 +776,13 @@ func TestNewEncrypter(t *testing.T) {
|
||||
fh, err = c.newEncrypter(z, nil)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
|
||||
}
|
||||
|
||||
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
|
||||
// cause a fatal loop
|
||||
func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
@@ -1209,7 +811,7 @@ func (c *closeDetector) Close() error {
|
||||
}
|
||||
|
||||
func TestNewDecrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
@@ -1252,7 +854,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
|
||||
// Test the stream returning 0, io.ErrUnexpectedEOF
|
||||
func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
@@ -1268,7 +870,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = &zeroes{} // nodge the crypto rand generator
|
||||
|
||||
@@ -1474,7 +1076,7 @@ func TestDecrypterCalculateUnderlying(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDecrypterRead(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test truncating the file at each possible point
|
||||
@@ -1538,7 +1140,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDecrypterClose(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cd := newCloseDetector(bytes.NewBuffer(file16))
|
||||
@@ -1576,7 +1178,7 @@ func TestDecrypterClose(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPutGetBlock(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
block := c.getBlock()
|
||||
@@ -1587,7 +1189,7 @@ func TestPutGetBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKey(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check zero keys OK
|
||||
|
||||
@@ -3,13 +3,13 @@ package crypt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
@@ -30,7 +30,7 @@ func init() {
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "filename_encryption",
|
||||
@@ -39,13 +39,13 @@ func init() {
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "standard",
|
||||
Help: "Encrypt the filenames.\nSee the docs for the details.",
|
||||
Help: "Encrypt the filenames see the docs for the details.",
|
||||
}, {
|
||||
Value: "obfuscate",
|
||||
Help: "Very simple filename obfuscation.",
|
||||
}, {
|
||||
Value: "off",
|
||||
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
|
||||
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
@@ -71,7 +71,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password2",
|
||||
Help: "Password or pass phrase for salt.\n\nOptional but recommended.\nShould be different to the previous password.",
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
@@ -116,29 +116,6 @@ names, or for debugging purposes.`,
|
||||
Help: "Encrypt file data.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "filename_encoding",
|
||||
Help: `How to encode the encrypted filename to text string.
|
||||
|
||||
This option could help with shortening the encrypted filename. The
|
||||
suitable option would depend on the way your remote count the filename
|
||||
length and if it's case sensitve.`,
|
||||
Default: "base32",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "base32",
|
||||
Help: "Encode using base32. Suitable for all remote.",
|
||||
},
|
||||
{
|
||||
Value: "base64",
|
||||
Help: "Encode using base64. Suitable for case sensitive remote.",
|
||||
},
|
||||
{
|
||||
Value: "base32768",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -154,22 +131,18 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
||||
}
|
||||
password, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decrypt password: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to decrypt password")
|
||||
}
|
||||
var salt string
|
||||
if opt.Password2 != "" {
|
||||
salt, err = obscure.Reveal(opt.Password2)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decrypt password2: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to decrypt password2")
|
||||
}
|
||||
}
|
||||
enc, err := NewNameEncoding(opt.FilenameEncoding)
|
||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption, enc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to make cipher")
|
||||
}
|
||||
return cipher, nil
|
||||
}
|
||||
@@ -219,7 +192,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
}
|
||||
}
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
|
||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
|
||||
}
|
||||
f := &Fs{
|
||||
Fs: wrappedFs,
|
||||
@@ -256,7 +229,6 @@ type Options struct {
|
||||
Password2 string `config:"password2"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
FilenameEncoding string `config:"filename_encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
@@ -328,7 +300,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
|
||||
case fs.Directory:
|
||||
f.addDir(ctx, &newEntries, x)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown object type %T", entry)
|
||||
return nil, errors.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
return newEntries, nil
|
||||
@@ -391,11 +363,7 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
if f.opt.NoDataEncryption {
|
||||
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||
if err == nil && o != nil {
|
||||
o = f.newObject(o)
|
||||
}
|
||||
return o, err
|
||||
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||
}
|
||||
|
||||
// Encrypt the data into wrappedIn
|
||||
@@ -434,7 +402,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
var dstHash string
|
||||
dstHash, err = o.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read destination hash: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||
}
|
||||
if srcHash != "" && dstHash != "" {
|
||||
if srcHash != dstHash {
|
||||
@@ -443,7 +411,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
}
|
||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||
}
|
||||
@@ -644,24 +612,24 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
|
||||
// Open the src for input
|
||||
in, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to open src: %w", err)
|
||||
return "", errors.Wrap(err, "failed to open src")
|
||||
}
|
||||
defer fs.CheckClose(in, &err)
|
||||
|
||||
// Now encrypt the src with the nonce
|
||||
out, err := f.cipher.newEncrypter(in, &nonce)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to make encrypter: %w", err)
|
||||
return "", errors.Wrap(err, "failed to make encrypter")
|
||||
}
|
||||
|
||||
// pipe into hash
|
||||
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to make hasher: %w", err)
|
||||
return "", errors.Wrap(err, "failed to make hasher")
|
||||
}
|
||||
_, err = io.Copy(m, out)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to hash data: %w", err)
|
||||
return "", errors.Wrap(err, "failed to hash data")
|
||||
}
|
||||
|
||||
return m.Sums()[hashType], nil
|
||||
@@ -680,12 +648,12 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
||||
// use a limited read so we only read the header
|
||||
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
}
|
||||
d, err := f.cipher.newDecrypter(in)
|
||||
if err != nil {
|
||||
_ = in.Close()
|
||||
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
}
|
||||
nonce := d.nonce
|
||||
// fs.Debugf(o, "Read nonce % 2x", nonce)
|
||||
@@ -704,7 +672,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
||||
// Close d (and hence in) once we have read the nonce
|
||||
err = d.Close()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to close nonce read: %w", err)
|
||||
return "", errors.Wrap(err, "failed to close nonce read")
|
||||
}
|
||||
|
||||
return f.computeHashWithNonce(ctx, nonce, src, hashType)
|
||||
@@ -823,7 +791,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
for _, encryptedFileName := range arg {
|
||||
fileName, err := f.DecryptFileName(encryptedFileName)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to decrypt: %s: %w", encryptedFileName, err)
|
||||
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
|
||||
}
|
||||
out = append(out, fileName)
|
||||
}
|
||||
@@ -1027,9 +995,6 @@ func (o *ObjectInfo) Size() int64 {
|
||||
if size < 0 {
|
||||
return size
|
||||
}
|
||||
if o.f.opt.NoDataEncryption {
|
||||
return size
|
||||
}
|
||||
return o.f.cipher.EncryptedSize(size)
|
||||
}
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ func TestIntegration(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestStandard runs integration tests against the remote
|
||||
func TestStandardBase32(t *testing.T) {
|
||||
func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
@@ -49,48 +49,6 @@ func TestStandardBase32(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardBase64(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name, Key: "filename_encoding", Value: "base64"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardBase32768(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
// TestOff runs integration tests against the remote
|
||||
func TestOff(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// buffers which are a multiple of an underlying crypto block size.
|
||||
package pkcs7
|
||||
|
||||
import "errors"
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// Errors Unpad can return
|
||||
var (
|
||||
|
||||
360
backend/drive/drive.go
Normal file → Executable file
360
backend/drive/drive.go
Normal file → Executable file
@@ -11,10 +11,10 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"path"
|
||||
@@ -26,13 +26,13 @@ import (
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
@@ -68,8 +68,8 @@ const (
|
||||
defaultScope = "drive"
|
||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
|
||||
defaultChunkSize = 8 * fs.Mebi
|
||||
minChunkSize = 256 * fs.KibiByte
|
||||
defaultChunkSize = 8 * fs.MebiByte
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||
@@ -183,71 +183,32 @@ func init() {
|
||||
Description: "Google Drive",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
switch config.State {
|
||||
case "":
|
||||
// Fill in the scopes
|
||||
driveConfig.Scopes = driveScopes(opt.Scope)
|
||||
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if driveScopesContainsAppFolder(driveConfig.Scopes) {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
|
||||
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
|
||||
OAuth2Config: driveConfig,
|
||||
})
|
||||
}
|
||||
return fs.ConfigGoto("teamdrive")
|
||||
case "teamdrive":
|
||||
if opt.TeamDriveID == "" {
|
||||
return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", "Configure this as a Shared Drive (Team Drive)?\n")
|
||||
}
|
||||
return fs.ConfigConfirm("teamdrive_change", false, "config_change_team_drive", fmt.Sprintf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID))
|
||||
case "teamdrive_ok":
|
||||
if config.Result == "false" {
|
||||
m.Set("team_drive", "")
|
||||
return nil, nil
|
||||
}
|
||||
return fs.ConfigGoto("teamdrive_config")
|
||||
case "teamdrive_change":
|
||||
if config.Result == "false" {
|
||||
return nil, nil
|
||||
}
|
||||
return fs.ConfigGoto("teamdrive_config")
|
||||
case "teamdrive_config":
|
||||
f, err := newFs(ctx, name, "", m)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make Fs to list Shared Drives: %w", err)
|
||||
}
|
||||
teamDrives, err := f.listTeamDrives(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(teamDrives) == 0 {
|
||||
return fs.ConfigError("", "No Shared Drives found in your account")
|
||||
}
|
||||
return fs.ConfigChoose("teamdrive_final", "config_team_drive", "Shared Drive", len(teamDrives), func(i int) (string, string) {
|
||||
teamDrive := teamDrives[i]
|
||||
return teamDrive.Id, teamDrive.Name
|
||||
})
|
||||
case "teamdrive_final":
|
||||
driveID := config.Result
|
||||
m.Set("team_drive", driveID)
|
||||
m.Set("root_folder_id", "")
|
||||
opt.TeamDriveID = driveID
|
||||
opt.RootFolderID = ""
|
||||
return nil, nil
|
||||
// Fill in the scopes
|
||||
driveConfig.Scopes = driveScopes(opt.Scope)
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if driveScopesContainsAppFolder(driveConfig.Scopes) {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" {
|
||||
err = oauthutil.Config(ctx, "drive", name, m, driveConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
}
|
||||
err = configTeamDrive(ctx, opt, m, name)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Shared Drive: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
Options: append(driveOAuthOptions(), []fs.Option{{
|
||||
Name: "scope",
|
||||
@@ -270,7 +231,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: `ID of the root folder.
|
||||
Help: `ID of the root folder
|
||||
Leave blank normally.
|
||||
|
||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
@@ -278,15 +239,15 @@ a non root folder as its starting point.
|
||||
`,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "team_drive",
|
||||
Help: "ID of the Shared Drive (Team Drive).",
|
||||
Help: "ID of the Shared Drive (Team Drive)",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -297,12 +258,12 @@ a non root folder as its starting point.
|
||||
}, {
|
||||
Name: "use_trash",
|
||||
Default: true,
|
||||
Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
|
||||
Help: "Send files to the trash instead of deleting permanently.\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_gdocs",
|
||||
Default: false,
|
||||
Help: "Skip google documents in all listings.\n\nIf given, gdocs practically become invisible to rclone.",
|
||||
Help: "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_checksum_gphotos",
|
||||
@@ -335,7 +296,7 @@ commands (copy, sync, etc.), and with all other commands too.`,
|
||||
}, {
|
||||
Name: "trashed_only",
|
||||
Default: false,
|
||||
Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
|
||||
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "starred_only",
|
||||
@@ -345,7 +306,7 @@ commands (copy, sync, etc.), and with all other commands too.`,
|
||||
}, {
|
||||
Name: "formats",
|
||||
Default: "",
|
||||
Help: "Deprecated: See export_formats.",
|
||||
Help: "Deprecated: see export_formats",
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
}, {
|
||||
@@ -361,12 +322,12 @@ commands (copy, sync, etc.), and with all other commands too.`,
|
||||
}, {
|
||||
Name: "allow_import_name_change",
|
||||
Default: false,
|
||||
Help: "Allow the filetype to change when uploading Google docs.\n\nE.g. file.doc to file.docx. This will confuse sync and reupload every time.",
|
||||
Help: "Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_created_date",
|
||||
Default: false,
|
||||
Help: `Use file created date instead of modified date.
|
||||
Help: `Use file created date instead of modified date.,
|
||||
|
||||
Useful when downloading data and you want the creation date used in
|
||||
place of the last modified date.
|
||||
@@ -400,7 +361,7 @@ date is used.`,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Default: 1000,
|
||||
Help: "Size of listing chunk 100-1000, 0 to disable.",
|
||||
Help: "Size of listing chunk 100-1000. 0 to disable.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
@@ -410,19 +371,17 @@ date is used.`,
|
||||
}, {
|
||||
Name: "alternate_export",
|
||||
Default: false,
|
||||
Help: "Deprecated: No longer needed.",
|
||||
Help: "Deprecated: no longer needed",
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Default: defaultChunkSize,
|
||||
Help: "Cutoff for switching to chunked upload.",
|
||||
Help: "Cutoff for switching to chunked upload",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Default: defaultChunkSize,
|
||||
Help: `Upload chunk size.
|
||||
|
||||
Must a power of 2 >= 256k.
|
||||
Help: `Upload chunk size. Must a power of 2 >= 256k.
|
||||
|
||||
Making this larger will improve performance, but note that each chunk
|
||||
is buffered in memory one per transfer.
|
||||
@@ -492,7 +451,7 @@ configurations.`,
|
||||
}, {
|
||||
Name: "disable_http2",
|
||||
Default: true,
|
||||
Help: `Disable drive using http2.
|
||||
Help: `Disable drive using http2
|
||||
|
||||
There is currently an unsolved issue with the google drive backend and
|
||||
HTTP/2. HTTP/2 is therefore disabled by default for the drive backend
|
||||
@@ -506,9 +465,9 @@ See: https://github.com/rclone/rclone/issues/3631
|
||||
}, {
|
||||
Name: "stop_on_upload_limit",
|
||||
Default: false,
|
||||
Help: `Make upload limit errors be fatal.
|
||||
Help: `Make upload limit errors be fatal
|
||||
|
||||
At the time of writing it is only possible to upload 750 GiB of data to
|
||||
At the time of writing it is only possible to upload 750GB of data to
|
||||
Google Drive a day (this is an undocumented limit). When this limit is
|
||||
reached Google Drive produces a slightly different error message. When
|
||||
this flag is set it causes these errors to be fatal. These will stop
|
||||
@@ -523,9 +482,9 @@ See: https://github.com/rclone/rclone/issues/3857
|
||||
}, {
|
||||
Name: "stop_on_download_limit",
|
||||
Default: false,
|
||||
Help: `Make download limit errors be fatal.
|
||||
Help: `Make download limit errors be fatal
|
||||
|
||||
At the time of writing it is only possible to download 10 TiB of data from
|
||||
At the time of writing it is only possible to download 10TB of data from
|
||||
Google Drive a day (this is an undocumented limit). When this limit is
|
||||
reached Google Drive produces a slightly different error message. When
|
||||
this flag is set it causes these errors to be fatal. These will stop
|
||||
@@ -537,7 +496,7 @@ Google don't document so it may break in the future.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_shortcuts",
|
||||
Help: `If set skip shortcut files.
|
||||
Help: `If set skip shortcut files
|
||||
|
||||
Normally rclone dereferences shortcut files making them appear as if
|
||||
they are the original file (see [the shortcuts section](#shortcuts)).
|
||||
@@ -563,7 +522,7 @@ If this flag is set then rclone will ignore shortcut files completely.
|
||||
} {
|
||||
for mimeType, extension := range m {
|
||||
if err := mime.AddExtensionType(extension, mimeType); err != nil {
|
||||
fs.Errorf("Failed to register MIME type %q: %v", mimeType, err)
|
||||
log.Fatalf("Failed to register MIME type %q: %v", mimeType, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -619,7 +578,6 @@ type Fs struct {
|
||||
client *http.Client // authorized client
|
||||
rootFolderID string // the id of the root folder
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
lastQuery string // Last query string to check in unit tests
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
exportExtensions []string // preferred extensions to download docs
|
||||
importMimeTypes []string // MIME types to convert to docs
|
||||
@@ -755,7 +713,7 @@ func (f *Fs) getFile(ctx context.Context, ID string, fields googleapi.Field) (in
|
||||
func (f *Fs) getRootID(ctx context.Context) (string, error) {
|
||||
info, err := f.getFile(ctx, "root", "id")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't find root directory ID: %w", err)
|
||||
return "", errors.Wrap(err, "couldn't find root directory ID")
|
||||
}
|
||||
return info.Id, nil
|
||||
}
|
||||
@@ -833,31 +791,11 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if filesOnly {
|
||||
query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
|
||||
}
|
||||
|
||||
// Constrain query using filter if this remote is a sync/copy/walk source.
|
||||
if fi, use := filter.GetConfig(ctx), filter.GetUseFilter(ctx); fi != nil && use {
|
||||
queryByTime := func(op string, tm time.Time) {
|
||||
if tm.IsZero() {
|
||||
return
|
||||
}
|
||||
// https://developers.google.com/drive/api/v3/ref-search-terms#operators
|
||||
// Query times use RFC 3339 format, default timezone is UTC
|
||||
timeStr := tm.UTC().Format("2006-01-02T15:04:05")
|
||||
term := fmt.Sprintf("(modifiedTime %s '%s' or mimeType = '%s')", op, timeStr, driveFolderType)
|
||||
query = append(query, term)
|
||||
}
|
||||
queryByTime(">=", fi.ModTimeFrom)
|
||||
queryByTime("<=", fi.ModTimeTo)
|
||||
}
|
||||
|
||||
list := f.svc.Files.List()
|
||||
queryString := strings.Join(query, " and ")
|
||||
if queryString != "" {
|
||||
list.Q(queryString)
|
||||
// fs.Debugf(f, "list query: %q", queryString)
|
||||
if len(query) > 0 {
|
||||
list.Q(strings.Join(query, " and "))
|
||||
// fmt.Printf("list Query = %q\n", query)
|
||||
}
|
||||
f.lastQuery = queryString // for unit tests
|
||||
|
||||
if f.opt.ListChunk > 0 {
|
||||
list.PageSize(f.opt.ListChunk)
|
||||
}
|
||||
@@ -882,7 +820,7 @@ OUTER:
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("couldn't list directory: %w", err)
|
||||
return false, errors.Wrap(err, "couldn't list directory")
|
||||
}
|
||||
if files.IncompleteSearch {
|
||||
fs.Errorf(f, "search result INCOMPLETE")
|
||||
@@ -904,7 +842,7 @@ OUTER:
|
||||
}
|
||||
item, err = f.resolveShortcut(ctx, item)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("list: %w", err)
|
||||
return false, errors.Wrap(err, "list")
|
||||
}
|
||||
}
|
||||
// Check the case of items is correct since
|
||||
@@ -965,7 +903,7 @@ func fixMimeType(mimeTypeIn string) string {
|
||||
mimeTypeOut = mime.FormatMediaType(mediaType, param)
|
||||
}
|
||||
if mimeTypeOut == "" {
|
||||
panic(fmt.Errorf("unable to fix MIME type %q", mimeTypeIn))
|
||||
panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
|
||||
}
|
||||
return mimeTypeOut
|
||||
}
|
||||
@@ -1000,7 +938,7 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
|
||||
}
|
||||
mt := mime.TypeByExtension(extension)
|
||||
if mt == "" {
|
||||
return extensions, mimeTypes, fmt.Errorf("couldn't find MIME type for extension %q", extension)
|
||||
return extensions, mimeTypes, errors.Errorf("couldn't find MIME type for extension %q", extension)
|
||||
}
|
||||
if !containsString(extensions, extension) {
|
||||
extensions = append(extensions, extension)
|
||||
@@ -1011,6 +949,48 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
|
||||
return
|
||||
}
|
||||
|
||||
// Figure out if the user wants to use a team drive
|
||||
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// Stop if we are running non-interactive config
|
||||
if ci.AutoConfirm {
|
||||
return nil
|
||||
}
|
||||
if opt.TeamDriveID == "" {
|
||||
fmt.Printf("Configure this as a Shared Drive (Team Drive)?\n")
|
||||
} else {
|
||||
fmt.Printf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID)
|
||||
}
|
||||
if !config.Confirm(false) {
|
||||
return nil
|
||||
}
|
||||
f, err := newFs(ctx, name, "", m)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to make Fs to list Shared Drives")
|
||||
}
|
||||
fmt.Printf("Fetching Shared Drive list...\n")
|
||||
teamDrives, err := f.listTeamDrives(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(teamDrives) == 0 {
|
||||
fmt.Printf("No Shared Drives found in your account")
|
||||
return nil
|
||||
}
|
||||
var driveIDs, driveNames []string
|
||||
for _, teamDrive := range teamDrives {
|
||||
driveIDs = append(driveIDs, teamDrive.Id)
|
||||
driveNames = append(driveNames, teamDrive.Name)
|
||||
}
|
||||
driveID := config.Choose("Enter a Shared Drive ID", driveIDs, driveNames, true)
|
||||
m.Set("team_drive", driveID)
|
||||
m.Set("root_folder_id", "")
|
||||
opt.TeamDriveID = driveID
|
||||
opt.RootFolderID = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
// getClient makes an http client according to the options
|
||||
func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||
@@ -1027,7 +1007,7 @@ func getServiceAccountClient(ctx context.Context, opt *Options, credentialsData
|
||||
scopes := driveScopes(opt.Scope)
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error processing credentials: %w", err)
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
}
|
||||
if opt.Impersonate != "" {
|
||||
conf.Subject = opt.Impersonate
|
||||
@@ -1044,19 +1024,19 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
}
|
||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||
}
|
||||
if opt.ServiceAccountCredentials != "" {
|
||||
oAuthClient, err = getServiceAccountClient(ctx, opt, []byte(opt.ServiceAccountCredentials))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to create oauth client from service account")
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create oauth client: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to create oauth client")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1065,10 +1045,10 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if !isPowerOfTwo(int64(cs)) {
|
||||
return fmt.Errorf("%v isn't a power of two", cs)
|
||||
return errors.Errorf("%v isn't a power of two", cs)
|
||||
}
|
||||
if cs < minChunkSize {
|
||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1106,16 +1086,16 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("drive: upload cutoff: %w", err)
|
||||
return nil, errors.Wrap(err, "drive: upload cutoff")
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("drive: chunk size: %w", err)
|
||||
return nil, errors.Wrap(err, "drive: chunk size")
|
||||
}
|
||||
|
||||
oAuthClient, err := createOAuthClient(ctx, opt, name, m)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("drive: failed when making oauth client: %w", err)
|
||||
return nil, errors.Wrap(err, "drive: failed when making oauth client")
|
||||
}
|
||||
|
||||
root, err := parseDrivePath(path)
|
||||
@@ -1149,13 +1129,13 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
f.client = oAuthClient
|
||||
f.svc, err = drive.New(f.client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create Drive client: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't create Drive client")
|
||||
}
|
||||
|
||||
if f.opt.V2DownloadMinSize >= 0 {
|
||||
f.v2Svc, err = drive_v2.New(f.client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create Drive v2 client: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't create Drive v2 client")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1180,8 +1160,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
|
||||
// otherwise look up the actual root ID
|
||||
rootID, err := f.getRootID(ctx)
|
||||
if err != nil {
|
||||
var gerr *googleapi.Error
|
||||
if errors.As(err, &gerr) && gerr.Code == 404 {
|
||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
// 404 means that this scope does not have permission to get the
|
||||
// root so just use "root"
|
||||
rootID = "root"
|
||||
@@ -1190,7 +1169,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
f.rootFolderID = rootID
|
||||
fs.Debugf(f, "'root_folder_id = %s' - save this in the config to speed up startup", rootID)
|
||||
fs.Debugf(f, "root_folder_id = %q - save this in the config to speed up startup", rootID)
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
|
||||
@@ -1323,7 +1302,7 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
|
||||
func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
|
||||
t := linkTemplate(exportMimeType)
|
||||
if t == nil {
|
||||
return nil, fmt.Errorf("unsupported link type %s", exportMimeType)
|
||||
return nil, errors.Errorf("unsupported link type %s", exportMimeType)
|
||||
}
|
||||
xdgIcon := _mimeTypeToXDGLinkIcons[info.MimeType]
|
||||
if xdgIcon == "" {
|
||||
@@ -1336,7 +1315,7 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
|
||||
info.WebViewLink, info.Name, xdgIcon,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("executing template failed: %w", err)
|
||||
return nil, errors.Wrap(err, "executing template failed")
|
||||
}
|
||||
|
||||
baseObject := f.newBaseObject(remote+extension, info)
|
||||
@@ -1353,8 +1332,8 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
|
||||
//
|
||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) {
|
||||
// If item has MD5 sum it is a file stored on drive
|
||||
if info.Md5Checksum != "" {
|
||||
// If item has MD5 sum or a length it is a file stored on drive
|
||||
if info.Md5Checksum != "" || info.Size > 0 {
|
||||
return f.newRegularObject(remote, info), nil
|
||||
}
|
||||
|
||||
@@ -1373,11 +1352,11 @@ func (f *Fs) newObjectWithExportInfo(
|
||||
// will have been resolved so this will do nothing.
|
||||
info, err = f.resolveShortcut(ctx, info)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new object: %w", err)
|
||||
return nil, errors.Wrap(err, "new object")
|
||||
}
|
||||
switch {
|
||||
case info.MimeType == driveFolderType:
|
||||
return nil, fs.ErrorIsDir
|
||||
return nil, fs.ErrorNotAFile
|
||||
case info.MimeType == shortcutMimeType:
|
||||
// We can only get here if f.opt.SkipShortcuts is set
|
||||
// and not from a listing. This is unlikely.
|
||||
@@ -1387,8 +1366,8 @@ func (f *Fs) newObjectWithExportInfo(
|
||||
// Pretend a dangling shortcut is a regular object
|
||||
// It will error if used, but appear in listings so it can be deleted
|
||||
return f.newRegularObject(remote, info), nil
|
||||
case info.Md5Checksum != "":
|
||||
// If item has MD5 sum it is a file stored on drive
|
||||
case info.Md5Checksum != "" || info.Size > 0:
|
||||
// If item has MD5 sum or a length it is a file stored on drive
|
||||
return f.newRegularObject(remote, info), nil
|
||||
case f.opt.SkipGdocs:
|
||||
fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
|
||||
@@ -2016,14 +1995,13 @@ func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *dr
|
||||
}
|
||||
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields)
|
||||
if err != nil {
|
||||
var gerr *googleapi.Error
|
||||
if errors.As(err, &gerr) && gerr.Code == 404 {
|
||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
// 404 means dangling shortcut, so just return the shortcut with the mime type mangled
|
||||
fs.Logf(nil, "Dangling shortcut %q detected", item.Name)
|
||||
item.MimeType = shortcutMimeTypeDangling
|
||||
return item, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to resolve shortcut: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to resolve shortcut")
|
||||
}
|
||||
// make sure we use the Name, Parents and Trashed from the original item
|
||||
newItem.Name = item.Name
|
||||
@@ -2125,10 +2103,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
|
||||
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
|
||||
if exportExt == "" {
|
||||
return nil, fmt.Errorf("No export format found for %q", importMimeType)
|
||||
return nil, errors.Errorf("No export format found for %q", importMimeType)
|
||||
}
|
||||
if exportExt != srcExt && !f.opt.AllowImportNameChange {
|
||||
return nil, fmt.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
|
||||
return nil, errors.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2149,7 +2127,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
// Don't retry, return a retry error instead
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = f.svc.Files.Create(createInfo).
|
||||
Media(in, googleapi.ContentType(srcMimeType), googleapi.ChunkSize(0)).
|
||||
Media(in, googleapi.ContentType(srcMimeType)).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever).
|
||||
@@ -2196,7 +2174,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("MergeDirs list failed on %v: %w", srcDir, err)
|
||||
return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
|
||||
}
|
||||
// move them into place
|
||||
for _, info := range infos {
|
||||
@@ -2212,14 +2190,14 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("MergeDirs move failed on %q in %v: %w", info.Name, srcDir, err)
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
|
||||
}
|
||||
}
|
||||
// rmdir (into trash) the now empty source directory
|
||||
fs.Infof(srcDir, "removing empty directory")
|
||||
err = f.delete(ctx, srcDir.ID(), true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("MergeDirs move failed to rmdir %q: %w", srcDir, err)
|
||||
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -2282,7 +2260,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return err
|
||||
}
|
||||
if found {
|
||||
return fmt.Errorf("directory not empty")
|
||||
return errors.Errorf("directory not empty")
|
||||
}
|
||||
}
|
||||
if root != "" {
|
||||
@@ -2460,7 +2438,7 @@ func (f *Fs) cleanupTeamDrive(ctx context.Context, dir string, directoryID strin
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to list directory: %w", err)
|
||||
err = errors.Wrap(err, "failed to list directory")
|
||||
r.Errors++
|
||||
fs.Errorf(dir, "%v", err)
|
||||
}
|
||||
@@ -2504,7 +2482,7 @@ func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get Shared Drive info: %w", err)
|
||||
return errors.Wrap(err, "failed to get Shared Drive info")
|
||||
}
|
||||
fs.Debugf(f, "read info from Shared Drive %q", td.Name)
|
||||
return err
|
||||
@@ -2527,7 +2505,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get Drive storageQuota: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to get Drive storageQuota")
|
||||
}
|
||||
q := about.StorageQuota
|
||||
usage := &fs.Usage{
|
||||
@@ -2851,7 +2829,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
|
||||
chunkSizeInt, err := strconv.ParseInt(chunkSizeString, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't convert chunk size to int: %w", err)
|
||||
return errors.Wrap(err, "couldn't convert chunk size to int")
|
||||
}
|
||||
chunkSize := fs.SizeSuffix(chunkSizeInt)
|
||||
if chunkSize == f.opt.ChunkSize {
|
||||
@@ -2888,17 +2866,17 @@ func (f *Fs) changeServiceAccountFile(ctx context.Context, file string) (err err
|
||||
f.opt.ServiceAccountCredentials = ""
|
||||
oAuthClient, err := createOAuthClient(ctx, &f.opt, f.name, f.m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("drive: failed when making oauth client: %w", err)
|
||||
return errors.Wrap(err, "drive: failed when making oauth client")
|
||||
}
|
||||
f.client = oAuthClient
|
||||
f.svc, err = drive.New(f.client)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't create Drive client: %w", err)
|
||||
return errors.Wrap(err, "couldn't create Drive client")
|
||||
}
|
||||
if f.opt.V2DownloadMinSize >= 0 {
|
||||
f.v2Svc, err = drive_v2.New(f.client)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't create Drive v2 client: %w", err)
|
||||
return errors.Wrap(err, "couldn't create Drive v2 client")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -2926,13 +2904,13 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
||||
}
|
||||
isDir = true
|
||||
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
|
||||
if err != fs.ErrorIsDir {
|
||||
return nil, fmt.Errorf("can't find source: %w", err)
|
||||
if err != fs.ErrorNotAFile {
|
||||
return nil, errors.Wrap(err, "can't find source")
|
||||
}
|
||||
// source was a directory
|
||||
srcID, err = srcFs.dirCache.FindDir(ctx, srcPath, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find source dir: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to find source dir")
|
||||
}
|
||||
isDir = true
|
||||
} else {
|
||||
@@ -2946,16 +2924,16 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
||||
if err != fs.ErrorObjectNotFound {
|
||||
if err == nil {
|
||||
err = errors.New("existing file")
|
||||
} else if err == fs.ErrorIsDir {
|
||||
} else if err == fs.ErrorNotAFile {
|
||||
err = errors.New("existing directory")
|
||||
}
|
||||
return nil, fmt.Errorf("not overwriting shortcut target: %w", err)
|
||||
return nil, errors.Wrap(err, "not overwriting shortcut target")
|
||||
}
|
||||
|
||||
// Create destination shortcut
|
||||
createInfo, err := dstFs.createFileInfo(ctx, dstPath, time.Now())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("shortcut destination failed: %w", err)
|
||||
return nil, errors.Wrap(err, "shortcut destination failed")
|
||||
}
|
||||
createInfo.MimeType = shortcutMimeType
|
||||
createInfo.ShortcutDetails = &drive.FileShortcutDetails{
|
||||
@@ -2972,7 +2950,7 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
||||
return dstFs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("shortcut creation failed: %w", err)
|
||||
return nil, errors.Wrap(err, "shortcut creation failed")
|
||||
}
|
||||
if isDir {
|
||||
return nil, nil
|
||||
@@ -2992,7 +2970,7 @@ func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.Drive, err err
|
||||
return defaultFs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return drives, fmt.Errorf("listing Team Drives failed: %w", err)
|
||||
return drives, errors.Wrap(err, "listing Team Drives failed")
|
||||
}
|
||||
drives = append(drives, teamDrives.Drives...)
|
||||
if teamDrives.NextPageToken == "" {
|
||||
@@ -3035,7 +3013,7 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to restore: %w", err)
|
||||
err = errors.Wrap(err, "failed to restore")
|
||||
r.Errors++
|
||||
fs.Errorf(remote, "%v", err)
|
||||
} else {
|
||||
@@ -3052,7 +3030,7 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to list directory: %w", err)
|
||||
err = errors.Wrap(err, "failed to list directory")
|
||||
r.Errors++
|
||||
fs.Errorf(dir, "%v", err)
|
||||
}
|
||||
@@ -3076,10 +3054,10 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
||||
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
info, err := f.getFile(ctx, id, f.fileFields)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't find id: %w", err)
|
||||
return errors.Wrap(err, "couldn't find id")
|
||||
}
|
||||
if info.MimeType == driveFolderType {
|
||||
return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||
}
|
||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||
o, err := f.newObjectWithInfo(ctx, info.Name, info)
|
||||
@@ -3102,7 +3080,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
}
|
||||
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("copy failed: %w", err)
|
||||
return errors.Wrap(err, "copy failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -3166,7 +3144,7 @@ account.
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend [-o config] drives drive:
|
||||
rclone backend drives drive:
|
||||
|
||||
This will return a JSON list of objects like this
|
||||
|
||||
@@ -3183,22 +3161,6 @@ This will return a JSON list of objects like this
|
||||
}
|
||||
]
|
||||
|
||||
With the -o config parameter it will output the list in a format
|
||||
suitable for adding to a config file to make aliases for all the
|
||||
drives found.
|
||||
|
||||
[My Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
||||
|
||||
[Test Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. This may require manual editing
|
||||
of the names.
|
||||
|
||||
`,
|
||||
}, {
|
||||
Name: "untrash",
|
||||
@@ -3301,7 +3263,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
if ok {
|
||||
targetFs, err := cache.Get(ctx, target)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't find target: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't find target")
|
||||
}
|
||||
dstFs, ok = targetFs.(*Fs)
|
||||
if !ok {
|
||||
@@ -3310,21 +3272,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
return f.makeShortcut(ctx, arg[0], dstFs, arg[1])
|
||||
case "drives":
|
||||
drives, err := f.listTeamDrives(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := opt["config"]; ok {
|
||||
lines := []string{}
|
||||
for _, drive := range drives {
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, fmt.Sprintf("[%s]", drive.Name))
|
||||
lines = append(lines, fmt.Sprintf("type = alias"))
|
||||
lines = append(lines, fmt.Sprintf("remote = %s,team_drive=%s,root_folder_id=:", f.name, drive.Id))
|
||||
}
|
||||
return lines, nil
|
||||
}
|
||||
return drives, nil
|
||||
return f.listTeamDrives(ctx)
|
||||
case "untrash":
|
||||
dir := ""
|
||||
if len(arg) > 0 {
|
||||
@@ -3340,7 +3288,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
arg = arg[2:]
|
||||
err = f.copyID(ctx, id, dest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
|
||||
return nil, errors.Wrapf(err, "failed copying %q to %q", id, dest)
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
@@ -3574,11 +3522,11 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
|
||||
url += "acknowledgeAbuse=true"
|
||||
_, res, err = o.httpResponse(ctx, url, "GET", options)
|
||||
} else {
|
||||
err = fmt.Errorf("Use the --drive-acknowledge-abuse flag to download this file: %w", err)
|
||||
err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open file failed: %w", err)
|
||||
return nil, errors.Wrap(err, "open file failed")
|
||||
}
|
||||
}
|
||||
return res.Body, nil
|
||||
@@ -3672,7 +3620,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
|
||||
Media(in, googleapi.ContentType(uploadMimeType), googleapi.ChunkSize(0)).
|
||||
Media(in, googleapi.ContentType(uploadMimeType)).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(o.fs.opt.KeepRevisionForever).
|
||||
@@ -3742,14 +3690,14 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
|
||||
}
|
||||
|
||||
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
|
||||
return fmt.Errorf("can't update google document type without --drive-import-formats")
|
||||
return errors.Errorf("can't update google document type without --drive-import-formats")
|
||||
}
|
||||
importMimeType = o.fs.findImportFormat(ctx, updateInfo.MimeType)
|
||||
if importMimeType == "" {
|
||||
return fmt.Errorf("no import format found for %q", srcMimeType)
|
||||
return errors.Errorf("no import format found for %q", srcMimeType)
|
||||
}
|
||||
if importMimeType != o.documentMimeType {
|
||||
return fmt.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
|
||||
return errors.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
|
||||
}
|
||||
updateInfo.MimeType = importMimeType
|
||||
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
@@ -16,12 +14,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
@@ -464,81 +461,6 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
||||
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
opt := &filter.Opt{}
|
||||
err := opt.MaxAge.Set("1h")
|
||||
assert.NoError(t, err)
|
||||
flt, err := filter.NewFilter(opt)
|
||||
assert.NoError(t, err)
|
||||
|
||||
defCtx := context.Background()
|
||||
fltCtx := filter.ReplaceConfig(defCtx, flt)
|
||||
|
||||
testCtx1 := fltCtx
|
||||
testCtx2 := filter.SetUseFilter(testCtx1, true)
|
||||
testCtx3, testCancel := context.WithCancel(testCtx2)
|
||||
testCtx4 := filter.SetUseFilter(testCtx3, false)
|
||||
testCancel()
|
||||
assert.False(t, filter.GetUseFilter(testCtx1))
|
||||
assert.True(t, filter.GetUseFilter(testCtx2))
|
||||
assert.True(t, filter.GetUseFilter(testCtx3))
|
||||
assert.False(t, filter.GetUseFilter(testCtx4))
|
||||
|
||||
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), "agequery-testdir")
|
||||
subFsResult, err := fs.NewFs(defCtx, subRemote)
|
||||
require.NoError(t, err)
|
||||
subFs, isDriveFs := subFsResult.(*Fs)
|
||||
require.True(t, isDriveFs)
|
||||
|
||||
tempDir1, err := ioutil.TempDir("", "rclone-drive-agequery1-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir1)
|
||||
}()
|
||||
tempFs1, err := fs.NewFs(defCtx, tempDir1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tempDir2, err := ioutil.TempDir("", "rclone-drive-agequery2-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir2)
|
||||
}()
|
||||
tempFs2, err := fs.NewFs(defCtx, tempDir2)
|
||||
require.NoError(t, err)
|
||||
|
||||
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
|
||||
_, _ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
|
||||
|
||||
// validate sync/copy
|
||||
const timeQuery = "(modifiedTime >= '"
|
||||
|
||||
assert.NoError(t, sync.CopyDir(defCtx, subFs, tempFs1, false))
|
||||
assert.NotContains(t, subFs.lastQuery, timeQuery)
|
||||
|
||||
assert.NoError(t, sync.CopyDir(fltCtx, subFs, tempFs1, false))
|
||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
||||
|
||||
assert.NoError(t, sync.CopyDir(fltCtx, tempFs2, subFs, false))
|
||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
||||
|
||||
assert.NoError(t, sync.CopyDir(defCtx, tempFs2, subFs, false))
|
||||
assert.NotContains(t, subFs.lastQuery, timeQuery)
|
||||
|
||||
// validate list/walk
|
||||
devNull, errOpen := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
|
||||
require.NoError(t, errOpen)
|
||||
defer func() {
|
||||
_ = devNull.Close()
|
||||
}()
|
||||
|
||||
assert.NoError(t, operations.List(defCtx, subFs, devNull))
|
||||
assert.NotContains(t, subFs.lastQuery, timeQuery)
|
||||
|
||||
assert.NoError(t, operations.List(fltCtx, subFs, devNull))
|
||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
@@ -556,7 +478,6 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -1,358 +0,0 @@
|
||||
// This file contains the implementation of the sync batcher for uploads
|
||||
//
|
||||
// Dropbox rules say you can start as many batches as you want, but
|
||||
// you may only have one batch being committed and must wait for the
|
||||
// batch to be finished before committing another.
|
||||
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
)
|
||||
|
||||
const (
|
||||
maxBatchSize = 1000 // max size the batch can be
|
||||
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
|
||||
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
|
||||
defaultBatchSizeAsync = 100 // default batch size if async
|
||||
)
|
||||
|
||||
// batcher holds info about the current items waiting for upload
|
||||
type batcher struct {
|
||||
f *Fs // Fs this batch is part of
|
||||
mode string // configured batch mode
|
||||
size int // maximum size for batch
|
||||
timeout time.Duration // idle timeout for batch
|
||||
async bool // whether we are using async batching
|
||||
in chan batcherRequest // incoming items to batch
|
||||
closed chan struct{} // close to indicate batcher shut down
|
||||
atexit atexit.FnHandle // atexit handle
|
||||
shutOnce sync.Once // make sure we shutdown once only
|
||||
wg sync.WaitGroup // wait for shutdown
|
||||
}
|
||||
|
||||
// batcherRequest holds an incoming request with a place for a reply
|
||||
type batcherRequest struct {
|
||||
commitInfo *files.UploadSessionFinishArg
|
||||
result chan<- batcherResponse
|
||||
}
|
||||
|
||||
// Return true if batcherRequest is the quit request
|
||||
func (br *batcherRequest) isQuit() bool {
|
||||
return br.commitInfo == nil
|
||||
}
|
||||
|
||||
// Send this to get the engine to quit
|
||||
var quitRequest = batcherRequest{}
|
||||
|
||||
// batcherResponse holds a response to be delivered to clients waiting
|
||||
// for a batch to complete.
|
||||
type batcherResponse struct {
|
||||
err error
|
||||
entry *files.FileMetadata
|
||||
}
|
||||
|
||||
// newBatcher creates a new batcher structure
|
||||
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
|
||||
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
|
||||
if size > maxBatchSize || size < 0 {
|
||||
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
|
||||
}
|
||||
|
||||
async := false
|
||||
|
||||
switch mode {
|
||||
case "sync":
|
||||
if size <= 0 {
|
||||
ci := fs.GetConfig(ctx)
|
||||
size = ci.Transfers
|
||||
}
|
||||
if timeout <= 0 {
|
||||
timeout = defaultTimeoutSync
|
||||
}
|
||||
case "async":
|
||||
if size <= 0 {
|
||||
size = defaultBatchSizeAsync
|
||||
}
|
||||
if timeout <= 0 {
|
||||
timeout = defaultTimeoutAsync
|
||||
}
|
||||
async = true
|
||||
case "off":
|
||||
size = 0
|
||||
default:
|
||||
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
|
||||
}
|
||||
|
||||
b := &batcher{
|
||||
f: f,
|
||||
mode: mode,
|
||||
size: size,
|
||||
timeout: timeout,
|
||||
async: async,
|
||||
in: make(chan batcherRequest, size),
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
if b.Batching() {
|
||||
b.atexit = atexit.Register(b.Shutdown)
|
||||
b.wg.Add(1)
|
||||
go b.commitLoop(context.Background())
|
||||
}
|
||||
return b, nil
|
||||
|
||||
}
|
||||
|
||||
// Batching returns true if batching is active
|
||||
func (b *batcher) Batching() bool {
|
||||
return b.size > 0
|
||||
}
|
||||
|
||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
|
||||
var arg = &files.UploadSessionFinishBatchArg{
|
||||
Entries: items,
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
err = fserrors.NoRetryError(err)
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("batch commit failed: %w", err)
|
||||
}
|
||||
return batchStatus, nil
|
||||
}
|
||||
|
||||
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
||||
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||
if launchBatchStatus.AsyncJobId == "" {
|
||||
return nil, errors.New("wait for batch completion: empty job ID")
|
||||
}
|
||||
var batchStatus *files.UploadSessionFinishBatchJobStatus
|
||||
sleepTime := 100 * time.Millisecond
|
||||
const maxSleepTime = 1 * time.Second
|
||||
startTime := time.Now()
|
||||
try := 1
|
||||
for {
|
||||
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
|
||||
if remaining < 0 {
|
||||
break
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
|
||||
AsyncJobId: launchBatchStatus.AsyncJobId,
|
||||
})
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
|
||||
} else {
|
||||
if batchStatus.Tag == "complete" {
|
||||
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
|
||||
return batchStatus.Complete, nil
|
||||
}
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
|
||||
}
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime *= 2
|
||||
if sleepTime > maxSleepTime {
|
||||
sleepTime = maxSleepTime
|
||||
}
|
||||
try++
|
||||
}
|
||||
if err == nil {
|
||||
err = errors.New("batch didn't complete")
|
||||
}
|
||||
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
|
||||
}
|
||||
|
||||
// commit a batch
|
||||
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
|
||||
// If commit fails then signal clients if sync
|
||||
var signalled = b.async
|
||||
defer func() {
|
||||
if err != nil && signalled {
|
||||
// Signal to clients that there was an error
|
||||
for _, result := range results {
|
||||
result <- batcherResponse{err: err}
|
||||
}
|
||||
}
|
||||
}()
|
||||
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
|
||||
fs.Debugf(b.f, "Committing %s", desc)
|
||||
|
||||
// finalise the batch getting either a result or a job id to poll
|
||||
batchStatus, err := b.finishBatch(ctx, items)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check whether batch is complete
|
||||
var complete *files.UploadSessionFinishBatchResult
|
||||
switch batchStatus.Tag {
|
||||
case "async_job_id":
|
||||
// wait for batch to complete
|
||||
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case "complete":
|
||||
complete = batchStatus.Complete
|
||||
default:
|
||||
return fmt.Errorf("batch returned unknown status %q", batchStatus.Tag)
|
||||
}
|
||||
|
||||
// Check we got the right number of entries
|
||||
entries := complete.Entries
|
||||
if len(entries) != len(results) {
|
||||
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
||||
}
|
||||
|
||||
// Report results to clients
|
||||
var (
|
||||
errorTag = ""
|
||||
errorCount = 0
|
||||
)
|
||||
for i := range results {
|
||||
item := entries[i]
|
||||
resp := batcherResponse{}
|
||||
if item.Tag == "success" {
|
||||
resp.entry = item.Success
|
||||
} else {
|
||||
errorCount++
|
||||
errorTag = item.Tag
|
||||
if item.Failure != nil {
|
||||
errorTag = item.Failure.Tag
|
||||
if item.Failure.LookupFailed != nil {
|
||||
errorTag += "/" + item.Failure.LookupFailed.Tag
|
||||
}
|
||||
if item.Failure.Path != nil {
|
||||
errorTag += "/" + item.Failure.Path.Tag
|
||||
}
|
||||
if item.Failure.PropertiesError != nil {
|
||||
errorTag += "/" + item.Failure.PropertiesError.Tag
|
||||
}
|
||||
}
|
||||
resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
|
||||
}
|
||||
if !b.async {
|
||||
results[i] <- resp
|
||||
}
|
||||
}
|
||||
// Show signalled so no need to report error to clients from now on
|
||||
signalled = true
|
||||
|
||||
// Report an error if any failed in the batch
|
||||
if errorTag != "" {
|
||||
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
|
||||
}
|
||||
|
||||
fs.Debugf(b.f, "Committed %s", desc)
|
||||
return nil
|
||||
}
|
||||
|
||||
// commitLoop runs the commit engine in the background
|
||||
func (b *batcher) commitLoop(ctx context.Context) {
|
||||
var (
|
||||
items []*files.UploadSessionFinishArg // current batch of uncommitted files
|
||||
results []chan<- batcherResponse // current batch of clients awaiting results
|
||||
idleTimer = time.NewTimer(b.timeout)
|
||||
commit = func() {
|
||||
err := b.commitBatch(ctx, items, results)
|
||||
if err != nil {
|
||||
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
|
||||
}
|
||||
items, results = nil, nil
|
||||
}
|
||||
)
|
||||
defer b.wg.Done()
|
||||
defer idleTimer.Stop()
|
||||
idleTimer.Stop()
|
||||
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case req := <-b.in:
|
||||
if req.isQuit() {
|
||||
break outer
|
||||
}
|
||||
items = append(items, req.commitInfo)
|
||||
results = append(results, req.result)
|
||||
idleTimer.Stop()
|
||||
if len(items) >= b.size {
|
||||
commit()
|
||||
} else {
|
||||
idleTimer.Reset(b.timeout)
|
||||
}
|
||||
case <-idleTimer.C:
|
||||
if len(items) > 0 {
|
||||
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
|
||||
commit()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
// commit any remaining items
|
||||
if len(items) > 0 {
|
||||
commit()
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown finishes any pending batches then shuts everything down
|
||||
//
|
||||
// Can be called from atexit handler
|
||||
func (b *batcher) Shutdown() {
|
||||
b.shutOnce.Do(func() {
|
||||
atexit.Unregister(b.atexit)
|
||||
fs.Infof(b.f, "Commiting uploads - please wait...")
|
||||
// show that batcher is shutting down
|
||||
close(b.closed)
|
||||
// quit the commitLoop by sending a quitRequest message
|
||||
//
|
||||
// Note that we don't close b.in because that will
|
||||
// cause write to closed channel in Commit when we are
|
||||
// exiting due to a signal.
|
||||
b.in <- quitRequest
|
||||
b.wg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
// Commit commits the file using a batch call, first adding it to the
|
||||
// batch and then waiting for the batch to complete in a synchronous
|
||||
// way if async is not set.
|
||||
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
|
||||
select {
|
||||
case <-b.closed:
|
||||
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
|
||||
default:
|
||||
}
|
||||
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
|
||||
resp := make(chan batcherResponse, 1)
|
||||
b.in <- batcherRequest{
|
||||
commitInfo: commitInfo,
|
||||
result: resp,
|
||||
}
|
||||
// If running async then don't wait for the result
|
||||
if b.async {
|
||||
return nil, nil
|
||||
}
|
||||
result := <-resp
|
||||
return result.entry, result.err
|
||||
}
|
||||
359
backend/dropbox/dropbox.go
Normal file → Executable file
359
backend/dropbox/dropbox.go
Normal file → Executable file
@@ -23,22 +23,23 @@ of path_display and all will be well.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/auth"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/common"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/sharing"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/team"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -64,9 +65,9 @@ const (
|
||||
// Upload chunk size - setting too small makes uploads slow.
|
||||
// Chunks are buffered into memory for retries.
|
||||
//
|
||||
// Speed vs chunk size uploading a 1 GiB file on 2017-11-22
|
||||
// Speed vs chunk size uploading a 1 GB file on 2017-11-22
|
||||
//
|
||||
// Chunk Size MiB, Speed MiB/s, % of max
|
||||
// Chunk Size MB, Speed Mbyte/s, % of max
|
||||
// 1 1.364 11%
|
||||
// 2 2.443 19%
|
||||
// 4 4.288 33%
|
||||
@@ -81,11 +82,11 @@ const (
|
||||
// 96 12.302 95%
|
||||
// 128 12.945 100%
|
||||
//
|
||||
// Choose 48 MiB which is 91% of Maximum speed. rclone by
|
||||
// default does 4 transfers so this should use 4*48 MiB = 192 MiB
|
||||
// Choose 48MB which is 91% of Maximum speed. rclone by
|
||||
// default does 4 transfers so this should use 4*48MB = 192MB
|
||||
// by default.
|
||||
defaultChunkSize = 48 * fs.Mebi
|
||||
maxChunkSize = 150 * fs.Mebi
|
||||
defaultChunkSize = 48 * fs.MebiByte
|
||||
maxChunkSize = 150 * fs.MebiByte
|
||||
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
|
||||
maxFileNameLength = 255
|
||||
)
|
||||
@@ -98,10 +99,8 @@ var (
|
||||
"files.content.write",
|
||||
"files.content.read",
|
||||
"sharing.write",
|
||||
"account_info.read", // needed for About
|
||||
// "file_requests.write",
|
||||
// "members.read", // needed for impersonate - but causes app to need to be approved by Dropbox Team Admin during the flow
|
||||
// "team_data.member"
|
||||
},
|
||||
// Endpoint: oauth2.Endpoint{
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
@@ -131,36 +130,39 @@ func getOauthConfig(m configmap.Mapper) *oauth2.Config {
|
||||
}
|
||||
// Make a copy of the config
|
||||
config := *dropboxConfig
|
||||
// Make a copy of the scopes with extra scopes requires appended
|
||||
config.Scopes = append(config.Scopes, "members.read", "team_data.member")
|
||||
// Make a copy of the scopes with "members.read" appended
|
||||
config.Scopes = append(config.Scopes, "members.read")
|
||||
return &config
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
DbHashType = hash.RegisterHash("dropbox", "DropboxHash", 64, dbhash.New)
|
||||
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "dropbox",
|
||||
Description: "Dropbox",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: getOauthConfig(m),
|
||||
NoOffline: true,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
opt := oauthutil.Options{
|
||||
NoOffline: true,
|
||||
OAuth2Opts: []oauth2.AuthCodeOption{
|
||||
oauth2.SetAuthURLParam("token_access_type", "offline"),
|
||||
},
|
||||
})
|
||||
}
|
||||
err := oauthutil.Config(ctx, "dropbox", name, m, getOauthConfig(m), &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
Help: fmt.Sprintf(`Upload chunk size (< %v).
|
||||
Help: fmt.Sprintf(`Upload chunk size. (< %v).
|
||||
|
||||
Any files larger than this will be uploaded in chunks of this size.
|
||||
|
||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
||||
deal with retries. Setting this larger will increase the speed
|
||||
slightly (at most 10%% for 128 MiB in tests) at the cost of using more
|
||||
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
||||
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
@@ -209,68 +211,6 @@ Note that we don't unmount the shared folder afterwards so the
|
||||
shared folder.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_mode",
|
||||
Help: `Upload file batching sync|async|off.
|
||||
|
||||
This sets the batch mode used by rclone.
|
||||
|
||||
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
|
||||
|
||||
This has 3 possible values
|
||||
|
||||
- off - no batching
|
||||
- sync - batch uploads and check completion (default)
|
||||
- async - batch upload and don't check completion
|
||||
|
||||
Rclone will close any outstanding batches when it exits which may make
|
||||
a delay on quit.
|
||||
`,
|
||||
Default: "sync",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_size",
|
||||
Help: `Max number of files in upload batch.
|
||||
|
||||
This sets the batch size of files to upload. It has to be less than 1000.
|
||||
|
||||
By default this is 0 which means rclone which calculate the batch size
|
||||
depending on the setting of batch_mode.
|
||||
|
||||
- batch_mode: async - default batch_size is 100
|
||||
- batch_mode: sync - default batch_size is the same as --transfers
|
||||
- batch_mode: off - not in use
|
||||
|
||||
Rclone will close any outstanding batches when it exits which may make
|
||||
a delay on quit.
|
||||
|
||||
Setting this is a great idea if you are uploading lots of small files
|
||||
as it will make them a lot quicker. You can use --transfers 32 to
|
||||
maximise throughput.
|
||||
`,
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_timeout",
|
||||
Help: `Max time to allow an idle upload batch before uploading.
|
||||
|
||||
If an upload batch is idle for more than this long then it will be
|
||||
uploaded.
|
||||
|
||||
The default for this is 0 which means rclone will choose a sensible
|
||||
default based on the batch_mode in use.
|
||||
|
||||
- batch_mode: async - default batch_timeout is 500ms
|
||||
- batch_mode: sync - default batch_timeout is 10s
|
||||
- batch_mode: off - not in use
|
||||
`,
|
||||
Default: fs.Duration(0),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_commit_timeout",
|
||||
Help: `Max time to wait for a batch to finish comitting`,
|
||||
Default: fs.Duration(10 * time.Minute),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -290,16 +230,11 @@ default based on the batch_mode in use.
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@@ -318,7 +253,6 @@ type Fs struct {
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
ns string // The namespace we are using or "" for none
|
||||
batcher *batcher // batch builder
|
||||
}
|
||||
|
||||
// Object describes a dropbox object
|
||||
@@ -334,6 +268,8 @@ type Object struct {
|
||||
hash string // content_hash of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
@@ -363,36 +299,36 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if err == nil {
|
||||
return false, err
|
||||
}
|
||||
errString := err.Error()
|
||||
baseErrString := errors.Cause(err).Error()
|
||||
// First check for specific errors
|
||||
if strings.Contains(errString, "insufficient_space") {
|
||||
if strings.Contains(baseErrString, "insufficient_space") {
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if strings.Contains(errString, "malformed_path") {
|
||||
} else if strings.Contains(baseErrString, "malformed_path") {
|
||||
return false, fserrors.NoRetryError(err)
|
||||
}
|
||||
// Then handle any official Retry-After header from Dropbox's SDK
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
fs.Logf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behavior for backward compatibility
|
||||
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.SizeSuffixBase
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
if cs > maxChunkSize {
|
||||
return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -415,7 +351,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dropbox: chunk size: %w", err)
|
||||
return nil, errors.Wrap(err, "dropbox: chunk size")
|
||||
}
|
||||
|
||||
// Convert the old token if it exists. The old token was just
|
||||
@@ -427,13 +363,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
||||
return nil, errors.Wrap(err, "NewFS convert token")
|
||||
}
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure dropbox: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to configure dropbox")
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -444,10 +380,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
Client: oAuthClient, // maybe???
|
||||
@@ -474,7 +406,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
memberIds, err := f.team.MembersGetInfo(args)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
||||
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
|
||||
}
|
||||
|
||||
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||
@@ -551,7 +483,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get current account failed: %w", err)
|
||||
return nil, errors.Wrap(err, "get current account failed")
|
||||
}
|
||||
switch x := acc.RootInfo.(type) {
|
||||
case *common.TeamRootInfo:
|
||||
@@ -559,30 +491,28 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
case *common.UserRootInfo:
|
||||
f.ns = x.RootNamespaceId
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
|
||||
return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
|
||||
}
|
||||
fs.Debugf(f, "Using root namespace %q", f.ns)
|
||||
}
|
||||
f.setRoot(root)
|
||||
|
||||
// See if the root is actually an object
|
||||
if f.root != "" {
|
||||
_, err = f.getFileMetadata(ctx, f.slashRoot)
|
||||
if err == nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
}
|
||||
f.setRoot(newRoot)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
_, err = f.getFileMetadata(ctx, f.slashRoot)
|
||||
if err == nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
}
|
||||
f.setRoot(newRoot)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// headerGenerator for dropbox sdk
|
||||
func (f *Fs) headerGenerator(hostType string, namespace string, route string) map[string]string {
|
||||
func (f *Fs) headerGenerator(hostType string, style string, namespace string, route string) map[string]string {
|
||||
if f.ns == "" {
|
||||
return map[string]string{}
|
||||
}
|
||||
@@ -632,9 +562,6 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *fi
|
||||
}
|
||||
fileInfo, ok := entry.(*files.FileMetadata)
|
||||
if !ok {
|
||||
if _, ok = entry.(*files.FolderMetadata); ok {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return fileInfo, nil
|
||||
@@ -712,7 +639,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list continue: %w", err)
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
@@ -786,7 +713,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list continue: %w", err)
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
@@ -796,7 +723,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
||||
fs: f,
|
||||
url: entry.PreviewUrl,
|
||||
remote: entryPath,
|
||||
modTime: *entry.TimeInvited,
|
||||
modTime: entry.TimeInvited,
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -852,7 +779,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
arg := files.ListFolderArg{
|
||||
Path: f.opt.Enc.FromStandardPath(root),
|
||||
Recursive: false,
|
||||
Limit: 1000,
|
||||
}
|
||||
if root == "/" {
|
||||
arg.Path = "" // Specify root folder as empty string
|
||||
@@ -880,7 +806,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list continue: %w", err)
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
@@ -992,7 +918,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
// check directory exists
|
||||
_, err = f.getDirMetadata(ctx, root)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Rmdir: %w", err)
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
|
||||
root = f.opt.Enc.FromStandardPath(root)
|
||||
@@ -1010,7 +936,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Rmdir: %w", err)
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(res.Entries) != 0 {
|
||||
return errors.New("directory not empty")
|
||||
@@ -1076,7 +1002,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy failed: %w", err)
|
||||
return nil, errors.Wrap(err, "copy failed")
|
||||
}
|
||||
|
||||
// Set the metadata
|
||||
@@ -1086,7 +1012,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
err = dstObj.setMetadataFromEntry(fileInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy failed: %w", err)
|
||||
return nil, errors.Wrap(err, "copy failed")
|
||||
}
|
||||
|
||||
return dstObj, nil
|
||||
@@ -1137,7 +1063,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move failed: %w", err)
|
||||
return nil, errors.Wrap(err, "move failed")
|
||||
}
|
||||
|
||||
// Set the metadata
|
||||
@@ -1147,7 +1073,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
err = dstObj.setMetadataFromEntry(fileInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move failed: %w", err)
|
||||
return nil, errors.Wrap(err, "move failed")
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
@@ -1172,7 +1098,14 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
}
|
||||
if expire < fs.DurationOff {
|
||||
expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
|
||||
createArg.Settings.Expires = &expiryTime
|
||||
createArg.Settings.Expires = expiryTime
|
||||
}
|
||||
// FIXME note we can't set Settings for non enterprise dropbox
|
||||
// because of https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75
|
||||
// however this only goes wrong when we set Expires, so as a
|
||||
// work-around remove Settings unless expire is set.
|
||||
if expire == fs.DurationOff {
|
||||
createArg.Settings = nil
|
||||
}
|
||||
|
||||
var linkRes sharing.IsSharedLinkMetadata
|
||||
@@ -1255,7 +1188,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("MoveDir failed: %w", err)
|
||||
return errors.Wrap(err, "MoveDir failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1269,7 +1202,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("about failed: %w", err)
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
}
|
||||
var total uint64
|
||||
if q.Allocation != nil {
|
||||
@@ -1409,7 +1342,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("list continue: %w", err)
|
||||
return "", errors.Wrap(err, "list continue")
|
||||
}
|
||||
cursor = changeList.Cursor
|
||||
var entryType fs.EntryType
|
||||
@@ -1418,13 +1351,13 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
switch info := entry.(type) {
|
||||
case *files.FolderMetadata:
|
||||
entryType = fs.EntryDirectory
|
||||
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
|
||||
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||
case *files.FileMetadata:
|
||||
entryType = fs.EntryObject
|
||||
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
|
||||
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||
case *files.DeletedMetadata:
|
||||
entryType = fs.EntryObject
|
||||
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
|
||||
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||
default:
|
||||
fs.Errorf(entry, "dropbox ChangeNotify: ignoring unknown EntryType %T", entry)
|
||||
continue
|
||||
@@ -1446,13 +1379,6 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(DbHashType)
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.batcher.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -1488,7 +1414,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
}
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read hash from metadata: %w", err)
|
||||
return "", errors.Wrap(err, "failed to read hash from metadata")
|
||||
}
|
||||
return o.hash, nil
|
||||
}
|
||||
@@ -1612,83 +1538,97 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// uploadChunked uploads the object in parts
|
||||
//
|
||||
// Will introduce two additional network requests to start and finish the session.
|
||||
// If the size is unknown (i.e. -1) the method incurs one additional
|
||||
// request to the Dropbox API that does not carry a payload to close the append session.
|
||||
// Will work optimally if size is >= uploadChunkSize. If the size is either
|
||||
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
|
||||
// avoidable request to the Dropbox API that does not carry payload.
|
||||
func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
|
||||
// start upload
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
chunks := 0
|
||||
if size != -1 {
|
||||
chunks = int(size/chunkSize) + 1
|
||||
}
|
||||
in := readers.NewCountingReader(in0)
|
||||
buf := make([]byte, int(chunkSize))
|
||||
|
||||
fmtChunk := func(cur int, last bool) {
|
||||
if chunks == 0 && last {
|
||||
fs.Debugf(o, "Streaming chunk %d/%d", cur, cur)
|
||||
} else if chunks == 0 {
|
||||
fs.Debugf(o, "Streaming chunk %d/unknown", cur)
|
||||
} else {
|
||||
fs.Debugf(o, "Uploading chunk %d/%d", cur, chunks)
|
||||
}
|
||||
}
|
||||
|
||||
// write the first chunk
|
||||
fmtChunk(1, false)
|
||||
var res *files.UploadSessionStartResult
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, nil)
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
chunks, remainder := size/chunkSize, size%chunkSize
|
||||
if remainder > 0 {
|
||||
chunks++
|
||||
}
|
||||
|
||||
// write chunks
|
||||
in := readers.NewCountingReader(in0)
|
||||
buf := make([]byte, int(chunkSize))
|
||||
cursor := files.UploadSessionCursor{
|
||||
SessionId: res.SessionId,
|
||||
Offset: 0,
|
||||
}
|
||||
appendArg := files.UploadSessionAppendArg{Cursor: &cursor}
|
||||
for currentChunk := 1; ; currentChunk++ {
|
||||
cursor.Offset = in.BytesRead()
|
||||
appendArg := files.UploadSessionAppendArg{
|
||||
Cursor: &cursor,
|
||||
Close: false,
|
||||
}
|
||||
|
||||
if chunks < 0 {
|
||||
fs.Debugf(o, "Streaming chunk %d/unknown", currentChunk)
|
||||
} else {
|
||||
fs.Debugf(o, "Uploading chunk %d/%d", currentChunk, chunks)
|
||||
// write more whole chunks (if any)
|
||||
currentChunk := 2
|
||||
for {
|
||||
if chunks > 0 && currentChunk >= chunks {
|
||||
// if the size is known, only upload full chunks. Remaining bytes are uploaded with
|
||||
// the UploadSessionFinish request.
|
||||
break
|
||||
} else if chunks == 0 && in.BytesRead()-cursor.Offset < uint64(chunkSize) {
|
||||
// if the size is unknown, upload as long as we can read full chunks from the reader.
|
||||
// The UploadSessionFinish request will not contain any payload.
|
||||
break
|
||||
}
|
||||
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
cursor.Offset = in.BytesRead()
|
||||
fmtChunk(currentChunk, false)
|
||||
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
||||
// after session is started, we retry everything
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if appendArg.Close {
|
||||
break
|
||||
}
|
||||
|
||||
if size > 0 {
|
||||
// if size is known, check if next chunk is final
|
||||
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
|
||||
} else {
|
||||
// if size is unknown, upload as long as we can read full chunks from the reader
|
||||
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
|
||||
}
|
||||
currentChunk++
|
||||
}
|
||||
|
||||
// finish upload
|
||||
// write the remains
|
||||
cursor.Offset = in.BytesRead()
|
||||
args := &files.UploadSessionFinishArg{
|
||||
Cursor: &cursor,
|
||||
Commit: commitInfo,
|
||||
}
|
||||
// If we are batching then we should have written all the data now
|
||||
// store the commit info now for a batch commit
|
||||
if o.fs.batcher.Batching() {
|
||||
return o.fs.batcher.Commit(ctx, args)
|
||||
}
|
||||
|
||||
fmtChunk(currentChunk, true)
|
||||
chunk = readers.NewRepeatableReaderBuffer(in, buf)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
@@ -1741,13 +1681,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
return fserrors.NoRetryError(fmt.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||
}
|
||||
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
|
||||
commitInfo.Mode.Tag = "overwrite"
|
||||
// The Dropbox API only accepts timestamps in UTC with second precision.
|
||||
clientModified := src.ModTime(ctx).UTC().Round(time.Second)
|
||||
commitInfo.ClientModified = &clientModified
|
||||
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
|
||||
// Don't attempt to create filenames that are too long
|
||||
if cErr := checkPathLength(commitInfo.Path); cErr != nil {
|
||||
return cErr
|
||||
@@ -1756,7 +1695,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
size := src.Size()
|
||||
var err error
|
||||
var entry *files.FileMetadata
|
||||
if size > int64(o.fs.opt.ChunkSize) || size < 0 || o.fs.batcher.Batching() {
|
||||
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
|
||||
} else {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
@@ -1765,16 +1704,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("upload failed: %w", err)
|
||||
}
|
||||
// If we haven't received data back from batch upload then fake it
|
||||
//
|
||||
// This will only happen if we are uploading async batches
|
||||
if entry == nil {
|
||||
o.bytes = size
|
||||
o.modTime = *commitInfo.ClientModified
|
||||
o.hash = "" // we don't have this
|
||||
return nil
|
||||
return errors.Wrap(err, "upload failed")
|
||||
}
|
||||
return o.setMetadataFromEntry(entry)
|
||||
}
|
||||
@@ -1803,7 +1733,6 @@ var (
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -2,16 +2,14 @@ package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -82,22 +80,16 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't read file info: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't read file info")
|
||||
}
|
||||
|
||||
return &file, err
|
||||
}
|
||||
|
||||
// maybe do some actual validation later if necessary
|
||||
func validToken(token *GetTokenResponse) bool {
|
||||
return token.Status == "OK"
|
||||
}
|
||||
|
||||
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
||||
request := DownloadRequest{
|
||||
URL: url,
|
||||
Single: 1,
|
||||
Pass: f.opt.FilePassword,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -107,11 +99,10 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
||||
var token GetTokenResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
||||
doretry, err := shouldRetry(ctx, resp, err)
|
||||
return doretry || !validToken(&token), err
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
|
||||
return &token, nil
|
||||
@@ -127,16 +118,10 @@ func fileFromSharedFile(file *SharedFile) File {
|
||||
|
||||
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://1fichier.com/dir/",
|
||||
Path: id,
|
||||
Parameters: map[string][]string{"json": {"1"}},
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
}
|
||||
if f.opt.FolderPassword != "" {
|
||||
opts.Method = "POST"
|
||||
opts.Parameters = nil
|
||||
opts.Body = strings.NewReader("json=1&pass=" + url.QueryEscape(f.opt.FolderPassword))
|
||||
Method: "GET",
|
||||
RootURL: "https://1fichier.com/dir/",
|
||||
Path: id,
|
||||
Parameters: map[string][]string{"json": {"1"}},
|
||||
}
|
||||
|
||||
var sharedFiles SharedFolderResponse
|
||||
@@ -145,7 +130,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
|
||||
entries = make([]fs.DirEntry, len(sharedFiles))
|
||||
@@ -174,7 +159,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
for i := range filesList.Items {
|
||||
item := &filesList.Items[i]
|
||||
@@ -202,7 +187,7 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list folders: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't list folders")
|
||||
}
|
||||
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
|
||||
for i := range foldersList.SubFolders {
|
||||
@@ -296,7 +281,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create folder: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't create folder")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
|
||||
@@ -323,10 +308,10 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't remove folder: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't remove folder")
|
||||
}
|
||||
if response.Status != "OK" {
|
||||
return nil, fmt.Errorf("can't remove folder: %s", response.Message)
|
||||
return nil, errors.New("Can't remove non-empty dir")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
||||
@@ -353,7 +338,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't remove file: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't remove file")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Removed file with url `%s`", url)
|
||||
@@ -380,7 +365,7 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't copy file")
|
||||
}
|
||||
|
||||
return response, nil
|
||||
@@ -405,35 +390,7 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) renameFile(ctx context.Context, url string, newName string) (response *RenameFileResponse, err error) {
|
||||
request := &RenameFileRequest{
|
||||
URLs: []RenameFileURL{
|
||||
{
|
||||
URL: url,
|
||||
Filename: newName,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/rename.cgi",
|
||||
}
|
||||
|
||||
response = &RenameFileResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't copy file")
|
||||
}
|
||||
|
||||
return response, nil
|
||||
@@ -454,7 +411,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("didnt got an upload node: %w", err)
|
||||
return nil, errors.Wrap(err, "didnt got an upload node")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got Upload node")
|
||||
@@ -498,7 +455,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't upload file: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't upload file")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Uploaded File `%s`", fileName)
|
||||
@@ -532,7 +489,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't finish file upload: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't finish file upload")
|
||||
}
|
||||
|
||||
return response, err
|
||||
|
||||
@@ -2,7 +2,6 @@ package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -10,6 +9,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -35,27 +35,17 @@ func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "fichier",
|
||||
Description: "1Fichier",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, config configmap.Mapper) {
|
||||
},
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
||||
Name: "api_key",
|
||||
}, {
|
||||
Help: "If you want to download a shared folder, add this parameter.",
|
||||
Help: "If you want to download a shared folder, add this parameter",
|
||||
Name: "shared_folder",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Help: "If you want to download a shared file that is password protected, add this parameter.",
|
||||
Name: "file_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
|
||||
Name: "folder_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -87,11 +77,9 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
APIKey string `config:"api_key"`
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
FilePassword string `config:"file_password"`
|
||||
FolderPassword string `config:"folder_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
APIKey string `config:"api_key"`
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs is the interface a cloud storage system must provide
|
||||
@@ -437,45 +425,25 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Find current directory ID
|
||||
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If it is in the correct directory, just rename it
|
||||
var url string
|
||||
if currentDirectoryID == directoryID {
|
||||
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, fmt.Errorf("couldn't rename file: %s", resp.Message)
|
||||
}
|
||||
url = resp.URLs[0].URL
|
||||
} else {
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
|
||||
}
|
||||
url = resp.URLs[0]
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.New("couldn't move file")
|
||||
}
|
||||
|
||||
file, err := f.readFileInfo(ctx, url)
|
||||
file, err := f.readFileInfo(ctx, resp.URLs[0])
|
||||
if err != nil {
|
||||
return nil, errors.New("couldn't read file data")
|
||||
}
|
||||
@@ -503,10 +471,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
|
||||
return nil, errors.New("couldn't move file")
|
||||
}
|
||||
|
||||
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
|
||||
|
||||
@@ -2,12 +2,11 @@ package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -123,7 +122,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// Delete duplicate after successful upload
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove old version: %w", err)
|
||||
return errors.Wrap(err, "failed to remove old version")
|
||||
}
|
||||
|
||||
// Replace guts of old object with new one
|
||||
|
||||
@@ -19,7 +19,6 @@ type ListFilesRequest struct {
|
||||
type DownloadRequest struct {
|
||||
URL string `json:"url"`
|
||||
Single int `json:"single"`
|
||||
Pass string `json:"pass,omitempty"`
|
||||
}
|
||||
|
||||
// RemoveFolderRequest is the request structure of the corresponding request
|
||||
@@ -64,9 +63,8 @@ type MoveFileRequest struct {
|
||||
|
||||
// MoveFileResponse is the response structure of the corresponding request
|
||||
type MoveFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
URLs []string `json:"urls"`
|
||||
Status string `json:"status"`
|
||||
URLs []string `json:"urls"`
|
||||
}
|
||||
|
||||
// CopyFileRequest is the request structure of the corresponding request
|
||||
@@ -78,10 +76,9 @@ type CopyFileRequest struct {
|
||||
|
||||
// CopyFileResponse is the response structure of the corresponding request
|
||||
type CopyFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
Copied int `json:"copied"`
|
||||
URLs []FileCopy `json:"urls"`
|
||||
Status string `json:"status"`
|
||||
Copied int `json:"copied"`
|
||||
URLs []FileCopy `json:"urls"`
|
||||
}
|
||||
|
||||
// FileCopy is used in the the CopyFileResponse
|
||||
@@ -90,30 +87,6 @@ type FileCopy struct {
|
||||
ToURL string `json:"to_url"`
|
||||
}
|
||||
|
||||
// RenameFileURL is the data structure to rename a single file
|
||||
type RenameFileURL struct {
|
||||
URL string `json:"url"`
|
||||
Filename string `json:"filename"`
|
||||
}
|
||||
|
||||
// RenameFileRequest is the request structure of the corresponding request
|
||||
type RenameFileRequest struct {
|
||||
URLs []RenameFileURL `json:"urls"`
|
||||
Pretty int `json:"pretty"`
|
||||
}
|
||||
|
||||
// RenameFileResponse is the response structure of the corresponding request
|
||||
type RenameFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
Renamed int `json:"renamed"`
|
||||
URLs []struct {
|
||||
URL string `json:"url"`
|
||||
OldFilename string `json:"old_filename"`
|
||||
NewFilename string `json:"new_filename"`
|
||||
} `json:"urls"`
|
||||
}
|
||||
|
||||
// GetUploadNodeResponse is the response structure of the corresponding request
|
||||
type GetUploadNodeResponse struct {
|
||||
ID string `json:"id"`
|
||||
|
||||
@@ -5,7 +5,6 @@ package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
@@ -52,46 +51,11 @@ func (t Time) String() string {
|
||||
return time.Time(t).UTC().Format(timeFormatParameters)
|
||||
}
|
||||
|
||||
// Int represents an integer which can be represented in JSON as a
|
||||
// quoted integer or an integer.
|
||||
type Int int
|
||||
|
||||
// MarshalJSON turns a Int into JSON
|
||||
func (i *Int) MarshalJSON() (out []byte, err error) {
|
||||
return json.Marshal((*int)(i))
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Int
|
||||
func (i *Int) UnmarshalJSON(data []byte) error {
|
||||
if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' {
|
||||
data = data[1 : len(data)-1]
|
||||
}
|
||||
return json.Unmarshal(data, (*int)(i))
|
||||
}
|
||||
|
||||
// String represents an string which can be represented in JSON as a
|
||||
// quoted string or an integer.
|
||||
type String string
|
||||
|
||||
// MarshalJSON turns a String into JSON
|
||||
func (s *String) MarshalJSON() (out []byte, err error) {
|
||||
return json.Marshal((*string)(s))
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a String
|
||||
func (s *String) UnmarshalJSON(data []byte) error {
|
||||
err := json.Unmarshal(data, (*string)(s))
|
||||
if err != nil {
|
||||
*s = String(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status return returned in all status responses
|
||||
type Status struct {
|
||||
Code string `json:"status"`
|
||||
Message string `json:"statusmessage"`
|
||||
TaskID String `json:"taskid"`
|
||||
TaskID string `json:"taskid"`
|
||||
// Warning string `json:"warning"` // obsolete
|
||||
}
|
||||
|
||||
@@ -151,7 +115,7 @@ type GetFolderContentsResponse struct {
|
||||
Total int `json:"total,string"`
|
||||
Items []Item `json:"filelist"`
|
||||
Folder Item `json:"folder"`
|
||||
From Int `json:"from"`
|
||||
From int `json:"from,string"`
|
||||
//Count int `json:"count"`
|
||||
Pid string `json:"pid"`
|
||||
RefreshResult Status `json:"refreshresult"`
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -33,6 +32,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/filefabric/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -65,7 +65,7 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of the Enterprise File Fabric to connect to.",
|
||||
Help: "URL of the Enterprise File Fabric to connect to",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://storagemadeeasy.com",
|
||||
@@ -79,15 +79,14 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: `ID of the root folder.
|
||||
|
||||
Help: `ID of the root folder
|
||||
Leave blank normally.
|
||||
|
||||
Fill in to make rclone start with directory of a given ID.
|
||||
`,
|
||||
}, {
|
||||
Name: "permanent_token",
|
||||
Help: `Permanent Authentication Token.
|
||||
Help: `Permanent Authentication Token
|
||||
|
||||
A Permanent Authentication Token can be created in the Enterprise File
|
||||
Fabric, on the users Dashboard under Security, there is an entry
|
||||
@@ -100,7 +99,7 @@ For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
|
||||
`,
|
||||
}, {
|
||||
Name: "token",
|
||||
Help: `Session Token.
|
||||
Help: `Session Token
|
||||
|
||||
This is a session token which rclone caches in the config file. It is
|
||||
usually valid for 1 hour.
|
||||
@@ -110,14 +109,14 @@ Don't set this value - rclone will set it automatically.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "token_expiry",
|
||||
Help: `Token expiry time.
|
||||
Help: `Token expiry time
|
||||
|
||||
Don't set this value - rclone will set it automatically.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "version",
|
||||
Help: `Version read from the file fabric.
|
||||
Help: `Version read from the file fabric
|
||||
|
||||
Don't set this value - rclone will set it automatically.
|
||||
`,
|
||||
@@ -223,14 +222,13 @@ var retryStatusCodes = []struct {
|
||||
// delete in that folder. Please try again later or use
|
||||
// another name. (error_background)
|
||||
code: "error_background",
|
||||
sleep: 1 * time.Second,
|
||||
sleep: 6 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
// try should be the number of the tries so far, counting up from 1
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError, try int) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
@@ -246,10 +244,9 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
|
||||
for _, retryCode := range retryStatusCodes {
|
||||
if code == retryCode.code {
|
||||
if retryCode.sleep > 0 {
|
||||
// make this thread only sleep exponentially increasing extra time
|
||||
sleepTime := retryCode.sleep << (try - 1)
|
||||
fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", sleepTime, retryCode.code)
|
||||
time.Sleep(sleepTime)
|
||||
// make this thread only sleep extra time
|
||||
fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", retryCode.sleep, retryCode.code)
|
||||
time.Sleep(retryCode.sleep)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
@@ -267,7 +264,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, rootID string, path string
|
||||
"pid": rootID,
|
||||
}, &resp, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check path exists: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to check path exists")
|
||||
}
|
||||
if resp.Exists != "y" {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
@@ -308,7 +305,7 @@ func (f *Fs) getApplianceInfo(ctx context.Context) error {
|
||||
"token": "*",
|
||||
}, &applianceInfo, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read appliance version: %w", err)
|
||||
return errors.Wrap(err, "failed to read appliance version")
|
||||
}
|
||||
f.opt.Version = applianceInfo.SoftwareVersionLabel
|
||||
f.m.Set("version", f.opt.Version)
|
||||
@@ -349,7 +346,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
|
||||
"authtoken": f.opt.PermanentToken,
|
||||
}, &info, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get session token: %w", err)
|
||||
return "", errors.Wrap(err, "failed to get session token")
|
||||
}
|
||||
refreshed = true
|
||||
now = now.Add(tokenLifeTime)
|
||||
@@ -403,13 +400,11 @@ func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKEr
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Options: options,
|
||||
}
|
||||
try := 0
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
try++
|
||||
// Refresh the body each retry
|
||||
opts.Body = strings.NewReader(data.Encode())
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, result)
|
||||
return f.shouldRetry(ctx, resp, err, result, try)
|
||||
return f.shouldRetry(ctx, resp, err, result)
|
||||
})
|
||||
if err != nil {
|
||||
return resp, err
|
||||
@@ -562,7 +557,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
"fi_name": f.opt.Enc.FromStandardName(leaf),
|
||||
}, &info, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create directory: %w", err)
|
||||
return "", errors.Wrap(err, "failed to create directory")
|
||||
}
|
||||
// fmt.Printf("...Id %q\n", *info.Id)
|
||||
return info.Item.ID, nil
|
||||
@@ -595,7 +590,7 @@ OUTER:
|
||||
var info api.GetFolderContentsResponse
|
||||
_, err = f.rpc(ctx, "getFolderContents", p, &info, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list directory: %w", err)
|
||||
return false, errors.Wrap(err, "failed to list directory")
|
||||
}
|
||||
for i := range info.Items {
|
||||
item := &info.Items[i]
|
||||
@@ -726,7 +721,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) (err error) {
|
||||
"completedeletion": "n",
|
||||
}, &info, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete file: %w", err)
|
||||
return errors.Wrap(err, "failed to delete file")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -763,7 +758,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
}, &info, nil)
|
||||
f.dirCache.FlushDir(dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove directory: %w", err)
|
||||
return errors.Wrap(err, "failed to remove directory")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -825,7 +820,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
_, err = f.rpc(ctx, "doCopyFile", p, &info, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to copy file: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to copy file")
|
||||
}
|
||||
err = dstObj.setMetaData(&info.Item)
|
||||
if err != nil {
|
||||
@@ -844,7 +839,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
}
|
||||
|
||||
// Wait for the the background task to complete if necessary
|
||||
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
|
||||
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID string) (err error) {
|
||||
if taskID == "" || taskID == "0" {
|
||||
// No task to wait for
|
||||
return nil
|
||||
@@ -857,7 +852,7 @@ func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err
|
||||
"taskid": taskID,
|
||||
}, &info, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to wait for task %s to complete: %w", taskID, err)
|
||||
return errors.Wrapf(err, "failed to wait for task %s to complete", taskID)
|
||||
}
|
||||
if len(info.Tasks) == 0 {
|
||||
// task has finished
|
||||
@@ -890,7 +885,7 @@ func (f *Fs) renameLeaf(ctx context.Context, isDir bool, id string, newLeaf stri
|
||||
"fi_name": newLeaf,
|
||||
}, &info, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to rename leaf: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to rename leaf")
|
||||
}
|
||||
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
|
||||
if err != nil {
|
||||
@@ -934,7 +929,7 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
|
||||
"dir_id": newDirectoryID,
|
||||
}, &info, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to move file to new directory: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to move file to new directory")
|
||||
}
|
||||
item = &info.Item
|
||||
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
|
||||
@@ -1037,7 +1032,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
var info api.EmptyResponse
|
||||
_, err = f.rpc(ctx, "emptyTrashInBackground", params{}, &info, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to empty trash: %w", err)
|
||||
return errors.Wrap(err, "failed to empty trash")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1094,7 +1089,7 @@ func (o *Object) Size() int64 {
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
if info.Type != api.ItemTypeFile {
|
||||
return fs.ErrorIsDir
|
||||
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
||||
}
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
@@ -1164,7 +1159,7 @@ func (o *Object) modifyFile(ctx context.Context, keyValues [][2]string) error {
|
||||
"data": data.String(),
|
||||
}, &info, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update metadata: %w", err)
|
||||
return errors.Wrap(err, "failed to update metadata")
|
||||
}
|
||||
return o.setMetaData(&info.Item)
|
||||
}
|
||||
@@ -1247,7 +1242,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
_, err = o.fs.rpc(ctx, "doInitUpload", p, &upload, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize upload: %w", err)
|
||||
return errors.Wrap(err, "failed to initialize upload")
|
||||
}
|
||||
|
||||
// Cancel the upload if aborted or it fails
|
||||
@@ -1283,20 +1278,18 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var contentLength = size
|
||||
opts.ContentLength = &contentLength // NB CallJSON scribbles on this which is naughty
|
||||
}
|
||||
try := 0
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
try++
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploader)
|
||||
return o.fs.shouldRetry(ctx, resp, err, nil, try)
|
||||
return o.fs.shouldRetry(ctx, resp, err, nil)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload: %w", err)
|
||||
return errors.Wrap(err, "failed to upload")
|
||||
}
|
||||
if uploader.Success != "y" {
|
||||
return fmt.Errorf("upload failed")
|
||||
return errors.Errorf("upload failed")
|
||||
}
|
||||
if size > 0 && uploader.FileSize != size {
|
||||
return fmt.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
|
||||
return errors.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
|
||||
}
|
||||
|
||||
// Now finalize the file
|
||||
@@ -1308,7 +1301,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
_, err = o.fs.rpc(ctx, "doCompleteUpload", p, &finalize, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to finalize upload: %w", err)
|
||||
return errors.Wrap(err, "failed to finalize upload")
|
||||
}
|
||||
finalized = true
|
||||
|
||||
|
||||
@@ -4,8 +4,6 @@ package ftp
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/textproto"
|
||||
@@ -16,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -49,22 +48,26 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Help: "FTP host to connect to",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ftp.example.com",
|
||||
Help: "Connect to ftp.example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + currentUser + ".",
|
||||
Help: "FTP username, leave blank for current username, " + currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21).",
|
||||
Help: "FTP port, leave blank to use default (21)",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password.",
|
||||
Help: "FTP password",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "tls",
|
||||
Help: `Use Implicit FTPS (FTP over TLS).
|
||||
|
||||
Help: `Use Implicit FTPS (FTP over TLS)
|
||||
When using implicit FTP over TLS the client connects using TLS
|
||||
right from the start which breaks compatibility with
|
||||
non-TLS-aware servers. This is usually served over port 990 rather
|
||||
@@ -72,41 +75,35 @@ than port 21. Cannot be used in combination with explicit FTP.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "explicit_tls",
|
||||
Help: `Use Explicit FTPS (FTP over TLS).
|
||||
|
||||
Help: `Use Explicit FTPS (FTP over TLS)
|
||||
When using explicit FTP over TLS the client explicitly requests
|
||||
security from the server in order to upgrade a plain text connection
|
||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_certificate",
|
||||
Help: "Do not verify the TLS certificate of the server.",
|
||||
Help: "Do not verify the TLS certificate of the server",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_epsv",
|
||||
Help: "Disable using EPSV even if server advertises support.",
|
||||
Help: "Disable using EPSV even if server advertises support",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_mlsd",
|
||||
Help: "Disable using MLSD even if server advertises support.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "writing_mdtm",
|
||||
Help: "Use MDTM to set modification time (VsFtpd quirk)",
|
||||
Help: "Disable using MLSD even if server advertises support",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Help: `Max time before closing idle connections.
|
||||
Help: `Max time before closing idle connections
|
||||
|
||||
If no connections have been returned to the connection pool in the time
|
||||
given, rclone will empty the connection pool.
|
||||
@@ -119,51 +116,17 @@ Set to 0 to keep connections indefinitely.
|
||||
Help: "Maximum time to wait for a response to close.",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "tls_cache_size",
|
||||
Help: `Size of TLS session cache for all control and data connections.
|
||||
|
||||
TLS cache allows to resume TLS sessions and reuse PSK between connections.
|
||||
Increase if default size is not enough resulting in TLS resumption errors.
|
||||
Enabled by default. Use 0 to disable.`,
|
||||
Default: 32,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_tls13",
|
||||
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shut_timeout",
|
||||
Help: "Maximum time to wait for data connection closing status.",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "ask_password",
|
||||
Default: false,
|
||||
Help: `Allow asking for FTP password when needed.
|
||||
|
||||
If this is set and no password is supplied then rclone will ask for a password
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// The FTP protocol can't handle trailing spaces
|
||||
// (for instance, pureftpd turns them into '_')
|
||||
// The FTP protocol can't handle trailing spaces (for instance
|
||||
// pureftpd turns them into _)
|
||||
//
|
||||
// proftpd can't handle '*' in file names
|
||||
// pureftpd can't handle '[', ']' or '*'
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeRightSpace),
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "Asterisk,Ctl,Dot,Slash",
|
||||
Help: "ProFTPd can't handle '*' in file names",
|
||||
}, {
|
||||
Value: "BackSlash,Ctl,Del,Dot,RightSpace,Slash,SquareBracket",
|
||||
Help: "PureFTPd can't handle '[]' or '*' in file names",
|
||||
}, {
|
||||
Value: "Ctl,LeftPeriod,Slash",
|
||||
Help: "VsFTPd can't handle file names starting with dot",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -176,17 +139,12 @@ type Options struct {
|
||||
Port string `config:"port"`
|
||||
TLS bool `config:"tls"`
|
||||
ExplicitTLS bool `config:"explicit_tls"`
|
||||
TLSCacheSize int `config:"tls_cache_size"`
|
||||
DisableTLS13 bool `config:"disable_tls13"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
WritingMDTM bool `config:"writing_mdtm"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -207,9 +165,6 @@ type Fs struct {
|
||||
tokens *pacer.TokenDispenser
|
||||
tlsConf *tls.Config
|
||||
pacer *fs.Pacer // pacer for FTP connections
|
||||
fGetTime bool // true if the ftp library accepts GetTime
|
||||
fSetTime bool // true if the ftp library accepts SetTime
|
||||
fLstTime bool // true if the List call returns precise time
|
||||
}
|
||||
|
||||
// Object describes an FTP file
|
||||
@@ -224,7 +179,6 @@ type FileInfo struct {
|
||||
Name string
|
||||
Size uint64
|
||||
ModTime time.Time
|
||||
precise bool // true if the time is precise
|
||||
IsDir bool
|
||||
}
|
||||
|
||||
@@ -336,12 +290,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
if f.opt.DisableMLSD {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
||||
}
|
||||
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
|
||||
}
|
||||
if f.opt.WritingMDTM {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
|
||||
}
|
||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||
}
|
||||
@@ -358,7 +306,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to make FTP connection to %q: %w", f.dialAddr, err)
|
||||
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
|
||||
}
|
||||
return c, err
|
||||
}
|
||||
@@ -405,8 +353,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular FTP error code then check the connection
|
||||
var tpErr *textproto.Error
|
||||
if !errors.As(err, &tpErr) {
|
||||
_, isRegularError := errors.Cause(err).(*textproto.Error)
|
||||
if !isRegularError {
|
||||
nopErr := c.NoOp()
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
@@ -452,14 +400,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pass := ""
|
||||
if opt.AskPassword && opt.Pass == "" {
|
||||
pass = config.GetPassword("FTP server password")
|
||||
} else {
|
||||
pass, err = obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewFS decrypt password: %w", err)
|
||||
}
|
||||
pass, err := obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFS decrypt password")
|
||||
}
|
||||
user := opt.User
|
||||
if user == "" {
|
||||
@@ -484,12 +427,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
ServerName: opt.Host,
|
||||
InsecureSkipVerify: opt.SkipVerifyTLSCert,
|
||||
}
|
||||
if opt.TLSCacheSize > 0 {
|
||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
|
||||
}
|
||||
if opt.DisableTLS13 {
|
||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
||||
}
|
||||
}
|
||||
u := protocol + path.Join(dialAddr+"/", root)
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -516,13 +453,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewFs: %w", err)
|
||||
}
|
||||
f.fGetTime = c.IsGetTimeSupported()
|
||||
f.fSetTime = c.IsSetTimeSupported()
|
||||
f.fLstTime = c.IsTimePreciseInList()
|
||||
if !f.fLstTime && f.fGetTime {
|
||||
f.features.SlowModTime = true
|
||||
return nil, errors.Wrap(err, "NewFs")
|
||||
}
|
||||
f.putFtpConnection(&c, nil)
|
||||
if root != "" {
|
||||
@@ -534,7 +465,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
}
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
|
||||
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||
// File doesn't exist so return old f
|
||||
f.root = root
|
||||
return f, nil
|
||||
@@ -613,7 +544,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
||||
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("findItem: %w", err)
|
||||
return nil, errors.Wrap(err, "findItem")
|
||||
}
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
@@ -642,12 +573,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
o.info = &FileInfo{
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: entry.Size,
|
||||
ModTime: entry.Time,
|
||||
precise: f.fLstTime,
|
||||
}
|
||||
o.info = info
|
||||
|
||||
return o, nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
@@ -657,7 +589,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
||||
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("dirExists: %w", err)
|
||||
return false, errors.Wrap(err, "dirExists")
|
||||
}
|
||||
if entry != nil && entry.Type == ftp.EntryTypeFolder {
|
||||
return true, nil
|
||||
@@ -678,7 +610,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list: %w", err)
|
||||
return nil, errors.Wrap(err, "list")
|
||||
}
|
||||
|
||||
var listErr error
|
||||
@@ -716,7 +648,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if len(files) == 0 {
|
||||
exists, err := f.dirExists(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list: %w", err)
|
||||
return nil, errors.Wrap(err, "list")
|
||||
}
|
||||
if !exists {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
@@ -742,7 +674,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
Name: newremote,
|
||||
Size: object.Size,
|
||||
ModTime: object.Time,
|
||||
precise: f.fLstTime,
|
||||
}
|
||||
o.info = info
|
||||
entries = append(entries, o)
|
||||
@@ -756,18 +687,8 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Precision shows whether modified time is supported or not depending on the
|
||||
// FTP server capabilities, namely whether FTP server:
|
||||
// - accepts the MDTM command to get file time (fGetTime)
|
||||
// or supports MLSD returning precise file time in the list (fLstTime)
|
||||
// - accepts the MFMT command to set file time (fSetTime)
|
||||
// or non-standard form of the MDTM command (fSetTime, too)
|
||||
// used by VsFtpd for the same purpose (WritingMDTM)
|
||||
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
|
||||
// Precision shows Modified Time not supported
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
if (f.fGetTime || f.fLstTime) && f.fSetTime {
|
||||
return time.Second
|
||||
}
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
@@ -780,7 +701,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||
err := f.mkParentDir(ctx, src.Remote())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Put mkParentDir failed: %w", err)
|
||||
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
@@ -803,7 +724,7 @@ func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err erro
|
||||
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getInfo: %w", err)
|
||||
return nil, errors.Wrap(err, "getInfo")
|
||||
}
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
@@ -819,7 +740,6 @@ func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err erro
|
||||
Name: remote,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
precise: f.fLstTime,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
@@ -841,7 +761,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||
}
|
||||
return fs.ErrorIsFile
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return fmt.Errorf("mkdir %q failed: %w", abspath, err)
|
||||
return errors.Wrapf(err, "mkdir %q failed", abspath)
|
||||
}
|
||||
parent := path.Dir(abspath)
|
||||
err = f.mkdir(ctx, parent)
|
||||
@@ -850,7 +770,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||
}
|
||||
c, connErr := f.getFtpConnection(ctx)
|
||||
if connErr != nil {
|
||||
return fmt.Errorf("mkdir: %w", connErr)
|
||||
return errors.Wrap(connErr, "mkdir")
|
||||
}
|
||||
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
||||
f.putFtpConnection(&c, err)
|
||||
@@ -886,7 +806,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Rmdir: %w", translateErrorFile(err))
|
||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||
}
|
||||
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
|
||||
f.putFtpConnection(&c, err)
|
||||
@@ -902,11 +822,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Move mkParentDir failed: %w", err)
|
||||
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
||||
}
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Move: %w", err)
|
||||
return nil, errors.Wrap(err, "Move")
|
||||
}
|
||||
err = c.Rename(
|
||||
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
|
||||
@@ -914,11 +834,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Move Rename failed: %w", err)
|
||||
return nil, errors.Wrap(err, "Move Rename failed")
|
||||
}
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Move NewObject failed: %w", err)
|
||||
return nil, errors.Wrap(err, "Move NewObject failed")
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
@@ -948,19 +868,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
return fs.ErrorIsFile
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return fmt.Errorf("DirMove getInfo failed: %w", err)
|
||||
return errors.Wrapf(err, "DirMove getInfo failed")
|
||||
}
|
||||
|
||||
// Make sure the parent directory exists
|
||||
err = f.mkdir(ctx, path.Dir(dstPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("DirMove mkParentDir dst failed: %w", err)
|
||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||
}
|
||||
|
||||
// Do the move
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("DirMove: %w", err)
|
||||
return errors.Wrap(err, "DirMove")
|
||||
}
|
||||
err = c.Rename(
|
||||
f.dirFromStandardPath(srcPath),
|
||||
@@ -968,7 +888,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("DirMove Rename(%q,%q) failed: %w", srcPath, dstPath, err)
|
||||
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1005,41 +925,12 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
if !o.info.precise && o.fs.fGetTime {
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err == nil {
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
path = o.fs.opt.Enc.FromStandardPath(path)
|
||||
modTime, err := c.GetTime(path)
|
||||
if err == nil && o.info != nil {
|
||||
o.info.ModTime = modTime
|
||||
o.info.precise = true
|
||||
}
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
}
|
||||
return o.info.ModTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if !o.fs.fSetTime {
|
||||
fs.Errorf(o.fs, "SetModTime is not supported")
|
||||
return nil
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
path = o.fs.opt.Enc.FromStandardPath(path)
|
||||
err = c.SetTime(path, modTime.In(time.UTC))
|
||||
if err == nil && o.info != nil {
|
||||
o.info.ModTime = modTime
|
||||
o.info.precise = true
|
||||
}
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
// Storable returns a boolean as to whether this object is storable
|
||||
@@ -1072,11 +963,7 @@ func (f *ftpReadCloser) Close() error {
|
||||
errchan <- f.rc.Close()
|
||||
}()
|
||||
// Wait for Close for up to 60 seconds by default
|
||||
closeTimeout := f.f.opt.CloseTimeout
|
||||
if closeTimeout == 0 {
|
||||
closeTimeout = fs.DurationOff
|
||||
}
|
||||
timer := time.NewTimer(time.Duration(closeTimeout))
|
||||
timer := time.NewTimer(time.Duration(f.f.opt.CloseTimeout))
|
||||
select {
|
||||
case err = <-errchan:
|
||||
timer.Stop()
|
||||
@@ -1125,12 +1012,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||
return rc, nil
|
||||
@@ -1160,33 +1047,19 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update: %w", err)
|
||||
return errors.Wrap(err, "Update")
|
||||
}
|
||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||
// Ignore error 250 here - send by some servers
|
||||
if err != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
// recycle connection in advance to let remove() find free token
|
||||
o.fs.putFtpConnection(nil, err)
|
||||
remove()
|
||||
return fmt.Errorf("update stor: %w", err)
|
||||
o.fs.putFtpConnection(nil, err)
|
||||
return errors.Wrap(err, "update stor")
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
||||
return fmt.Errorf("SetModTime: %w", err)
|
||||
}
|
||||
o.info, err = o.fs.getInfo(ctx, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update getinfo: %w", err)
|
||||
return errors.Wrap(err, "update getinfo")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1205,7 +1078,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
} else {
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Remove: %w", err)
|
||||
return errors.Wrap(err, "Remove")
|
||||
}
|
||||
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type settings map[string]interface{}
|
||||
|
||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
|
||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||
configMap := configmap.Simple{}
|
||||
for key, val := range opts {
|
||||
configMap[key] = fmt.Sprintf("%v", val)
|
||||
}
|
||||
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), f.Root())
|
||||
fixFs, err := fs.NewFs(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
return fixFs
|
||||
}
|
||||
|
||||
// test that big file uploads do not cause network i/o timeout
|
||||
func (f *Fs) testUploadTimeout(t *testing.T) {
|
||||
const (
|
||||
fileSize = 100000000 // 100 MiB
|
||||
idleTimeout = 40 * time.Millisecond // small because test server is local
|
||||
maxTime = 10 * time.Second // prevent test hangup
|
||||
)
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("not running with -short")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
saveLowLevelRetries := ci.LowLevelRetries
|
||||
saveTimeout := ci.Timeout
|
||||
defer func() {
|
||||
ci.LowLevelRetries = saveLowLevelRetries
|
||||
ci.Timeout = saveTimeout
|
||||
}()
|
||||
ci.LowLevelRetries = 1
|
||||
ci.Timeout = idleTimeout
|
||||
|
||||
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
|
||||
fixFs := deriveFs(ctx, t, f, settings{
|
||||
"concurrency": concurrency,
|
||||
"shut_timeout": shutTimeout,
|
||||
})
|
||||
|
||||
// Make test object
|
||||
fileTime := fstest.Time("2020-03-08T09:30:00.000000000Z")
|
||||
meta := object.NewStaticObjectInfo("upload-timeout.test", fileTime, int64(fileSize), true, nil, nil)
|
||||
data := readers.NewPatternReader(int64(fileSize))
|
||||
|
||||
// Run upload and ensure maximum time
|
||||
done := make(chan bool)
|
||||
deadline := time.After(maxTime)
|
||||
go func() {
|
||||
obj, err = fixFs.Put(ctx, data, meta)
|
||||
done <- true
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-deadline:
|
||||
t.Fatalf("Upload got stuck for %v !", maxTime)
|
||||
}
|
||||
|
||||
return obj, err
|
||||
}
|
||||
|
||||
// non-zero shut_timeout should fix i/o errors
|
||||
obj, err := upload(f.opt.Concurrency, time.Second)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, obj)
|
||||
if obj != nil {
|
||||
_ = obj.Remove(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// rclone must support precise time with ProFtpd and PureFtpd out of the box.
|
||||
// The VsFtpd server does not support the MFMT command to set file time like
|
||||
// other servers but by default supports the MDTM command in the non-standard
|
||||
// two-argument form for the same purpose.
|
||||
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
|
||||
func (f *Fs) testTimePrecision(t *testing.T) {
|
||||
name := f.Name()
|
||||
if pos := strings.Index(name, "{"); pos != -1 {
|
||||
name = name[:pos]
|
||||
}
|
||||
switch name {
|
||||
case "TestFTPProftpd", "TestFTPPureftpd", "TestFTPVsftpd":
|
||||
assert.LessOrEqual(t, f.Precision(), time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("UploadTimeout", f.testUploadTimeout)
|
||||
t.Run("TimePrecision", f.testTimePrecision)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
@@ -9,27 +9,25 @@ import (
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against rclone FTP server
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTPRclone:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegrationProftpd runs integration tests against proFTPd
|
||||
func TestIntegrationProftpd(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTPProftpd:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegrationPureftpd runs integration tests against pureFTPd
|
||||
func TestIntegrationPureftpd(t *testing.T) {
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTPRclone:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration3(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
@@ -39,13 +37,12 @@ func TestIntegrationPureftpd(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegrationVsftpd runs integration tests against vsFTPd
|
||||
func TestIntegrationVsftpd(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTPVsftpd:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
// func TestIntegration4(t *testing.T) {
|
||||
// if *fstest.RemoteName != "" {
|
||||
// t.Skip("skipping as -remote is set")
|
||||
// }
|
||||
// fstests.Run(t, &fstests.Opt{
|
||||
// RemoteName: "TestFTPVsftpd:",
|
||||
// NilObject: (*ftp.Object)(nil),
|
||||
// })
|
||||
// }
|
||||
|
||||
@@ -16,16 +16,16 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -51,10 +51,10 @@ import (
|
||||
const (
|
||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
|
||||
timeFormat = time.RFC3339Nano
|
||||
metaMtime = "mtime" // key to store mtime in metadata
|
||||
metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
metaMtime = "mtime" // key to store mtime under in metadata
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
@@ -76,71 +76,72 @@ func init() {
|
||||
Prefix: "gcs",
|
||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
anonymous, _ := m.Get("anonymous")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||
return nil, nil
|
||||
return
|
||||
}
|
||||
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: storageConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "project_number",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: "anonymous",
|
||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "object_acl",
|
||||
Help: "Access Control List for new objects.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "authenticatedRead",
|
||||
Help: "Object owner gets OWNER access.\nAll Authenticated Users get READER access.",
|
||||
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.",
|
||||
}, {
|
||||
Value: "bucketOwnerFullControl",
|
||||
Help: "Object owner gets OWNER access.\nProject team owners get OWNER access.",
|
||||
Help: "Object owner gets OWNER access, and project team owners get OWNER access.",
|
||||
}, {
|
||||
Value: "bucketOwnerRead",
|
||||
Help: "Object owner gets OWNER access.\nProject team owners get READER access.",
|
||||
Help: "Object owner gets OWNER access, and project team owners get READER access.",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Object owner gets OWNER access.\nDefault if left blank.",
|
||||
Help: "Object owner gets OWNER access [default if left blank].",
|
||||
}, {
|
||||
Value: "projectPrivate",
|
||||
Help: "Object owner gets OWNER access.\nProject team members get access according to their roles.",
|
||||
Help: "Object owner gets OWNER access, and project team members get access according to their roles.",
|
||||
}, {
|
||||
Value: "publicRead",
|
||||
Help: "Object owner gets OWNER access.\nAll Users get READER access.",
|
||||
Help: "Object owner gets OWNER access, and all Users get READER access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_acl",
|
||||
Help: "Access Control List for new buckets.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "authenticatedRead",
|
||||
Help: "Project team owners get OWNER access.\nAll Authenticated Users get READER access.",
|
||||
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Project team owners get OWNER access.\nDefault if left blank.",
|
||||
Help: "Project team owners get OWNER access [default if left blank].",
|
||||
}, {
|
||||
Value: "projectPrivate",
|
||||
Help: "Project team members get access according to their roles.",
|
||||
}, {
|
||||
Value: "publicRead",
|
||||
Help: "Project team owners get OWNER access.\nAll Users get READER access.",
|
||||
Help: "Project team owners get OWNER access, and all Users get READER access.",
|
||||
}, {
|
||||
Value: "publicReadWrite",
|
||||
Help: "Project team owners get OWNER access.\nAll Users get WRITER access.",
|
||||
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_policy_only",
|
||||
@@ -163,64 +164,64 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Help: "Location for the newly created buckets.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Empty for default location (US)",
|
||||
Help: "Empty for default location (US).",
|
||||
}, {
|
||||
Value: "asia",
|
||||
Help: "Multi-regional location for Asia",
|
||||
Help: "Multi-regional location for Asia.",
|
||||
}, {
|
||||
Value: "eu",
|
||||
Help: "Multi-regional location for Europe",
|
||||
Help: "Multi-regional location for Europe.",
|
||||
}, {
|
||||
Value: "us",
|
||||
Help: "Multi-regional location for United States",
|
||||
Help: "Multi-regional location for United States.",
|
||||
}, {
|
||||
Value: "asia-east1",
|
||||
Help: "Taiwan",
|
||||
Help: "Taiwan.",
|
||||
}, {
|
||||
Value: "asia-east2",
|
||||
Help: "Hong Kong",
|
||||
Help: "Hong Kong.",
|
||||
}, {
|
||||
Value: "asia-northeast1",
|
||||
Help: "Tokyo",
|
||||
Help: "Tokyo.",
|
||||
}, {
|
||||
Value: "asia-south1",
|
||||
Help: "Mumbai",
|
||||
Help: "Mumbai.",
|
||||
}, {
|
||||
Value: "asia-southeast1",
|
||||
Help: "Singapore",
|
||||
Help: "Singapore.",
|
||||
}, {
|
||||
Value: "australia-southeast1",
|
||||
Help: "Sydney",
|
||||
Help: "Sydney.",
|
||||
}, {
|
||||
Value: "europe-north1",
|
||||
Help: "Finland",
|
||||
Help: "Finland.",
|
||||
}, {
|
||||
Value: "europe-west1",
|
||||
Help: "Belgium",
|
||||
Help: "Belgium.",
|
||||
}, {
|
||||
Value: "europe-west2",
|
||||
Help: "London",
|
||||
Help: "London.",
|
||||
}, {
|
||||
Value: "europe-west3",
|
||||
Help: "Frankfurt",
|
||||
Help: "Frankfurt.",
|
||||
}, {
|
||||
Value: "europe-west4",
|
||||
Help: "Netherlands",
|
||||
Help: "Netherlands.",
|
||||
}, {
|
||||
Value: "us-central1",
|
||||
Help: "Iowa",
|
||||
Help: "Iowa.",
|
||||
}, {
|
||||
Value: "us-east1",
|
||||
Help: "South Carolina",
|
||||
Help: "South Carolina.",
|
||||
}, {
|
||||
Value: "us-east4",
|
||||
Help: "Northern Virginia",
|
||||
Help: "Northern Virginia.",
|
||||
}, {
|
||||
Value: "us-west1",
|
||||
Help: "Oregon",
|
||||
Help: "Oregon.",
|
||||
}, {
|
||||
Value: "us-west2",
|
||||
Help: "California",
|
||||
Help: "California.",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
@@ -375,7 +376,7 @@ func (o *Object) split() (bucket, bucketPath string) {
|
||||
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error processing credentials: %w", err)
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
}
|
||||
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
@@ -408,7 +409,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
}
|
||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||
}
|
||||
@@ -417,7 +418,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
} else if opt.ServiceAccountCredentials != "" {
|
||||
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
|
||||
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
@@ -425,7 +426,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ctx := context.Background()
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -449,7 +450,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.client = oAuthClient
|
||||
f.svc, err = storage.New(f.client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
|
||||
}
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
@@ -759,10 +760,10 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
return nil
|
||||
} else if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code != http.StatusNotFound {
|
||||
return fmt.Errorf("failed to get bucket: %w", err)
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("failed to get bucket: %w", err)
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
|
||||
if f.opt.ProjectNumber == "" {
|
||||
@@ -921,7 +922,7 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
// read mtime out of metadata if available
|
||||
mtimeString, ok := info.Metadata[metaMtime]
|
||||
if ok {
|
||||
modTime, err := time.Parse(timeFormat, mtimeString)
|
||||
modTime, err := time.Parse(timeFormatIn, mtimeString)
|
||||
if err == nil {
|
||||
o.modTime = modTime
|
||||
return
|
||||
@@ -929,19 +930,8 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
|
||||
}
|
||||
|
||||
// Fallback to GSUtil mtime
|
||||
mtimeGsutilString, ok := info.Metadata[metaMtimeGsutil]
|
||||
if ok {
|
||||
unixTimeSec, err := strconv.ParseInt(mtimeGsutilString, 10, 64)
|
||||
if err == nil {
|
||||
o.modTime = time.Unix(unixTimeSec, 0)
|
||||
return
|
||||
}
|
||||
fs.Debugf(o, "Failed to read GSUtil mtime from metadata: %s", err)
|
||||
}
|
||||
|
||||
// Fallback to the Updated time
|
||||
modTime, err := time.Parse(timeFormat, info.Updated)
|
||||
modTime, err := time.Parse(timeFormatIn, info.Updated)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Bad time decode: %v", err)
|
||||
} else {
|
||||
@@ -998,8 +988,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
// Returns metadata for an object
|
||||
func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
metadata := make(map[string]string, 1)
|
||||
metadata[metaMtime] = modTime.Format(timeFormat)
|
||||
metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
|
||||
metadata[metaMtime] = modTime.Format(timeFormatOut)
|
||||
return metadata
|
||||
}
|
||||
|
||||
@@ -1011,11 +1000,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
return err
|
||||
}
|
||||
// Add the mtime to the existing metadata
|
||||
mtime := modTime.Format(timeFormatOut)
|
||||
if object.Metadata == nil {
|
||||
object.Metadata = make(map[string]string, 1)
|
||||
}
|
||||
object.Metadata[metaMtime] = modTime.Format(timeFormat)
|
||||
object.Metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
|
||||
object.Metadata[metaMtime] = mtime
|
||||
// Copy the object to itself to update the metadata
|
||||
// Using PATCH requires too many permissions
|
||||
bucket, bucketPath := o.split()
|
||||
@@ -1065,7 +1054,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
_, isRanging := req.Header["Range"]
|
||||
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
|
||||
_ = res.Body.Close() // ignore error
|
||||
return nil, fmt.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
|
||||
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
@@ -6,9 +6,9 @@ package googlephotos
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
golog "log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -18,9 +18,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -55,7 +54,6 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
||||
scopeAccess = 2 // position of access scope in list
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -64,7 +62,7 @@ var (
|
||||
Scopes: []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadWrite, // this must be at position scopeAccess
|
||||
scopeReadWrite,
|
||||
},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
@@ -80,36 +78,36 @@ func init() {
|
||||
Prefix: "gphotos",
|
||||
Description: "Google Photos",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
switch config.State {
|
||||
case "":
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
|
||||
}
|
||||
return oauthutil.ConfigOut("warning", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
case "warning":
|
||||
// Warn the user as required by google photos integration
|
||||
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
|
||||
|
||||
IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
are stored in full resolution at original quality. These uploads
|
||||
will count towards storage in your Google Account.`)
|
||||
case "warning_done":
|
||||
return nil, nil
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes[0] = scopeReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes[0] = scopeReadWrite
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
|
||||
// Do the oauth
|
||||
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
golog.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
|
||||
// Warn the user
|
||||
fmt.Print(`
|
||||
*** IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
*** are stored in full resolution at original quality. These uploads
|
||||
*** will count towards storage in your Google Account.
|
||||
|
||||
`)
|
||||
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "read_only",
|
||||
@@ -132,14 +130,14 @@ you want to read the media.`,
|
||||
}, {
|
||||
Name: "start_year",
|
||||
Default: 2000,
|
||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year.`,
|
||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "include_archived",
|
||||
Default: false,
|
||||
Help: `Also view and download archived media.
|
||||
|
||||
By default, rclone does not request archived media. Thus, when syncing,
|
||||
By default rclone does not request archived media. Thus, when syncing,
|
||||
archived media is not visible in directory listings or transferred.
|
||||
|
||||
Note that media in albums is always visible and synced, no matter
|
||||
@@ -151,24 +149,16 @@ listings and transferred.
|
||||
Without this flag, archived media will not be visible in directory
|
||||
listings and won't be transferred.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
StartYear int `config:"start_year"`
|
||||
IncludeArchived bool `config:"include_archived"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
StartYear int `config:"start_year"`
|
||||
IncludeArchived bool `config:"include_archived"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -292,7 +282,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Box: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
|
||||
root = strings.Trim(path.Clean(root), "/")
|
||||
@@ -345,13 +335,13 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't read openID config: %w", err)
|
||||
return "", errors.Wrap(err, "couldn't read openID config")
|
||||
}
|
||||
|
||||
// Find userinfo endpoint
|
||||
endpoint, ok := openIDconfig[name].(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("couldn't find %q from openID config", name)
|
||||
return "", errors.Errorf("couldn't find %q from openID config", name)
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
@@ -374,7 +364,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't read user info: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't read user info")
|
||||
}
|
||||
return userInfo, nil
|
||||
}
|
||||
@@ -405,7 +395,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't revoke token: %w", err)
|
||||
return errors.Wrap(err, "couldn't revoke token")
|
||||
}
|
||||
fs.Infof(f, "res = %+v", res)
|
||||
return nil
|
||||
@@ -492,7 +482,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list albums: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't list albums")
|
||||
}
|
||||
newAlbums := result.Albums
|
||||
if shared {
|
||||
@@ -506,9 +496,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
|
||||
lastID = newAlbums[len(newAlbums)-1].ID
|
||||
}
|
||||
for i := range newAlbums {
|
||||
anAlbum := newAlbums[i]
|
||||
anAlbum.Title = f.opt.Enc.FromStandardPath(anAlbum.Title)
|
||||
all.add(&anAlbum)
|
||||
all.add(&newAlbums[i])
|
||||
}
|
||||
if result.NextPageToken == "" {
|
||||
break
|
||||
@@ -549,7 +537,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't list files: %w", err)
|
||||
return errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
items := result.MediaItems
|
||||
if len(items) > 0 && items[0].ID == lastID {
|
||||
@@ -693,7 +681,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create album: %w", err)
|
||||
return nil, errors.Wrap(err, "couldn't create album")
|
||||
}
|
||||
f.albums[false].add(&result)
|
||||
return &result, nil
|
||||
@@ -879,7 +867,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get media item: %w", err)
|
||||
return errors.Wrap(err, "couldn't get media item")
|
||||
}
|
||||
o.setMetaData(&item)
|
||||
return nil
|
||||
@@ -1014,7 +1002,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't upload file: %w", err)
|
||||
return errors.Wrap(err, "couldn't upload file")
|
||||
}
|
||||
uploadToken := strings.TrimSpace(string(token))
|
||||
if uploadToken == "" {
|
||||
@@ -1042,14 +1030,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create media item: %w", err)
|
||||
return errors.Wrap(err, "failed to create media item")
|
||||
}
|
||||
if len(result.NewMediaItemResults) != 1 {
|
||||
return errors.New("bad response to BatchCreate wrong number of items")
|
||||
}
|
||||
mediaItemResult := result.NewMediaItemResults[0]
|
||||
if mediaItemResult.Status.Code != 0 {
|
||||
return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
|
||||
return errors.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
|
||||
}
|
||||
o.setMetaData(&mediaItemResult.MediaItem)
|
||||
|
||||
@@ -1071,7 +1059,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
albumTitle, fileName := match[1], match[2]
|
||||
album, ok := o.fs.albums[false].get(albumTitle)
|
||||
if !ok {
|
||||
return fmt.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
|
||||
return errors.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -1087,7 +1075,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't delete item from album: %w", err)
|
||||
return errors.Wrap(err, "couldn't delete item from album")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
@@ -269,7 +270,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
|
||||
year := match[1]
|
||||
current, err := time.Parse("2006", year)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad year %q", match[1])
|
||||
return nil, errors.Errorf("bad year %q", match[1])
|
||||
}
|
||||
currentYear := current.Year()
|
||||
for current.Year() == currentYear {
|
||||
@@ -283,7 +284,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
|
||||
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
|
||||
year, err := strconv.Atoi(match[1])
|
||||
if err != nil || year < 1000 || year > 3000 {
|
||||
return sf, fmt.Errorf("bad year %q", match[1])
|
||||
return sf, errors.Errorf("bad year %q", match[1])
|
||||
}
|
||||
sf = api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
@@ -299,14 +300,14 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
|
||||
if len(match) >= 3 {
|
||||
month, err := strconv.Atoi(match[2])
|
||||
if err != nil || month < 1 || month > 12 {
|
||||
return sf, fmt.Errorf("bad month %q", match[2])
|
||||
return sf, errors.Errorf("bad month %q", match[2])
|
||||
}
|
||||
sf.Filters.DateFilter.Dates[0].Month = month
|
||||
}
|
||||
if len(match) >= 4 {
|
||||
day, err := strconv.Atoi(match[3])
|
||||
if err != nil || day < 1 || day > 31 {
|
||||
return sf, fmt.Errorf("bad day %q", match[3])
|
||||
return sf, errors.Errorf("bad day %q", match[3])
|
||||
}
|
||||
sf.Filters.DateFilter.Dates[0].Day = day
|
||||
}
|
||||
|
||||
@@ -1,180 +0,0 @@
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
)
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
switch name {
|
||||
case "drop":
|
||||
return nil, f.db.Stop(true)
|
||||
case "dump", "fulldump":
|
||||
return nil, f.dbDump(ctx, name == "fulldump", "")
|
||||
case "import", "stickyimport":
|
||||
sticky := name == "stickyimport"
|
||||
if len(arg) != 2 {
|
||||
return nil, errors.New("please provide checksum type and path to sum file")
|
||||
}
|
||||
return nil, f.dbImport(ctx, arg[0], arg[1], sticky)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "drop",
|
||||
Short: "Drop cache",
|
||||
Long: `Completely drop checksum cache.
|
||||
Usage Example:
|
||||
rclone backend drop hasher:
|
||||
`,
|
||||
}, {
|
||||
Name: "dump",
|
||||
Short: "Dump the database",
|
||||
Long: "Dump cache records covered by the current remote",
|
||||
}, {
|
||||
Name: "fulldump",
|
||||
Short: "Full dump of the database",
|
||||
Long: "Dump all cache records in the database",
|
||||
}, {
|
||||
Name: "import",
|
||||
Short: "Import a SUM file",
|
||||
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
|
||||
Usage Example:
|
||||
rclone backend import hasher:subdir md5 /path/to/sum.md5
|
||||
`,
|
||||
}, {
|
||||
Name: "stickyimport",
|
||||
Short: "Perform fast import of a SUM file",
|
||||
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
|
||||
Usage Example:
|
||||
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
|
||||
`,
|
||||
}}
|
||||
|
||||
func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
|
||||
if root == "" {
|
||||
remoteFs, err := cache.Get(ctx, f.opt.Remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
|
||||
}
|
||||
op := &kvDump{
|
||||
full: full,
|
||||
root: root,
|
||||
path: f.db.Path(),
|
||||
fs: f,
|
||||
}
|
||||
err := f.db.Do(false, op)
|
||||
if err == kv.ErrEmpty {
|
||||
fs.Infof(op.path, "empty")
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bool) error {
|
||||
var hashType hash.Type
|
||||
if err := hashType.Set(hashName); err != nil {
|
||||
return err
|
||||
}
|
||||
if hashType == hash.None {
|
||||
return errors.New("please provide a valid hash type")
|
||||
}
|
||||
if !f.suppHashes.Contains(hashType) {
|
||||
return errors.New("unsupported hash type")
|
||||
}
|
||||
if !f.keepHashes.Contains(hashType) {
|
||||
fs.Infof(nil, "Need not import hashes of this type")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, sumPath, err := fspath.SplitFs(sumRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sumFs, err := cache.Get(ctx, sumRemote)
|
||||
switch err {
|
||||
case fs.ErrorIsFile:
|
||||
// ok
|
||||
case nil:
|
||||
return fmt.Errorf("not a file: %s", sumRemote)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
sumObj, err := sumFs.NewObject(ctx, path.Base(sumPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open sum file: %w", err)
|
||||
}
|
||||
hashes, err := operations.ParseSumFile(ctx, sumObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse sum file: %w", err)
|
||||
}
|
||||
|
||||
if sticky {
|
||||
rootPath := f.Fs.Root()
|
||||
for remote, hashVal := range hashes {
|
||||
key := path.Join(rootPath, remote)
|
||||
hashSums := operations.HashSums{hashName: hashVal}
|
||||
if err := f.putRawHashes(ctx, key, anyFingerprint, hashSums); err != nil {
|
||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
||||
}
|
||||
}
|
||||
fs.Infof(nil, "Summary: %d checksum(s) imported", len(hashes))
|
||||
return nil
|
||||
}
|
||||
|
||||
const longImportThreshold = 100
|
||||
if len(hashes) > longImportThreshold {
|
||||
fs.Infof(nil, "Importing %d checksums. Please wait...", len(hashes))
|
||||
}
|
||||
|
||||
doneCount := 0
|
||||
err = operations.ListFn(ctx, f, func(obj fs.Object) {
|
||||
remote := obj.Remote()
|
||||
hash := hashes[remote]
|
||||
hashes[remote] = "" // mark as handled
|
||||
o, ok := obj.(*Object)
|
||||
if ok && hash != "" {
|
||||
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
|
||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
||||
}
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
|
||||
doneCount++
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Import failed: %v", err)
|
||||
}
|
||||
skipCount := 0
|
||||
for remote, emptyOrDone := range hashes {
|
||||
if emptyOrDone != "" {
|
||||
fs.Infof(nil, "Skip vanished object: %s", remote)
|
||||
skipCount++
|
||||
}
|
||||
}
|
||||
fs.Infof(nil, "Summary: %d imported, %d skipped", doneCount, skipCount)
|
||||
return err
|
||||
}
|
||||
@@ -1,508 +0,0 @@
|
||||
// Package hasher implements a checksum handling overlay backend
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "hasher",
|
||||
Description: "Better checksums for other remotes",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Required: true,
|
||||
Help: "Remote to cache checksums for (e.g. myRemote:path).",
|
||||
}, {
|
||||
Name: "hashes",
|
||||
Default: fs.CommaSepList{"md5", "sha1"},
|
||||
Advanced: false,
|
||||
Help: "Comma separated list of supported checksum types.",
|
||||
}, {
|
||||
Name: "max_age",
|
||||
Advanced: false,
|
||||
Default: fs.DurationOff,
|
||||
Help: "Maximum time to keep checksums in cache (0 = no cache, off = cache forever).",
|
||||
}, {
|
||||
Name: "auto_size",
|
||||
Advanced: true,
|
||||
Default: fs.SizeSuffix(0),
|
||||
Help: "Auto-update checksum for files smaller than this size (disabled by default).",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
Hashes fs.CommaSepList `config:"hashes"`
|
||||
AutoSize fs.SizeSuffix `config:"auto_size"`
|
||||
MaxAge fs.Duration `config:"max_age"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
name string
|
||||
root string
|
||||
wrapper fs.Fs
|
||||
features *fs.Features
|
||||
opt *Options
|
||||
db *kv.DB
|
||||
// fingerprinting
|
||||
fpTime bool // true if using time in fingerprints
|
||||
fpHash hash.Type // hash type to use in fingerprints or None
|
||||
// hash types triaged by groups
|
||||
suppHashes hash.Set // all supported checksum types
|
||||
passHashes hash.Set // passed directly to the base without caching
|
||||
slowHashes hash.Set // passed to the base and then cached
|
||||
autoHashes hash.Set // calculated in-house and cached
|
||||
keepHashes hash.Set // checksums to keep in cache (slow + auto)
|
||||
}
|
||||
|
||||
var warnExperimental sync.Once
|
||||
|
||||
// NewFs constructs an Fs from the remote:path string
|
||||
func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs.Fs, error) {
|
||||
if !kv.Supported() {
|
||||
return nil, errors.New("hasher is not supported on this OS")
|
||||
}
|
||||
warnExperimental.Do(func() {
|
||||
fs.Infof(nil, "Hasher is EXPERIMENTAL!")
|
||||
})
|
||||
|
||||
opt := &Options{}
|
||||
err := configstruct.Set(cmap, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if strings.HasPrefix(opt.Remote, fsname+":") {
|
||||
return nil, errors.New("can't point remote at itself")
|
||||
}
|
||||
remotePath := fspath.JoinRootPath(opt.Remote, rpath)
|
||||
baseFs, err := cache.Get(ctx, remotePath)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, fmt.Errorf("failed to derive base remote %q: %w", opt.Remote, err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
Fs: baseFs,
|
||||
name: fsname,
|
||||
root: rpath,
|
||||
opt: opt,
|
||||
}
|
||||
baseFeatures := baseFs.Features()
|
||||
f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported
|
||||
|
||||
if baseFeatures.SlowHash {
|
||||
f.slowHashes = f.Fs.Hashes()
|
||||
} else {
|
||||
f.passHashes = f.Fs.Hashes()
|
||||
f.fpHash = f.passHashes.GetOne()
|
||||
}
|
||||
|
||||
f.suppHashes = f.passHashes
|
||||
f.suppHashes.Add(f.slowHashes.Array()...)
|
||||
|
||||
for _, hashName := range opt.Hashes {
|
||||
var ht hash.Type
|
||||
if err := ht.Set(hashName); err != nil {
|
||||
return nil, fmt.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
|
||||
}
|
||||
if !f.slowHashes.Contains(ht) {
|
||||
f.autoHashes.Add(ht)
|
||||
}
|
||||
f.keepHashes.Add(ht)
|
||||
f.suppHashes.Add(ht)
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Groups by usage: cached %s, passed %s, auto %s, slow %s, supported %s",
|
||||
f.keepHashes, f.passHashes, f.autoHashes, f.slowHashes, f.suppHashes)
|
||||
|
||||
var nilSet hash.Set
|
||||
if f.keepHashes == nilSet {
|
||||
return nil, errors.New("configured hash_names have nothing to keep in cache")
|
||||
}
|
||||
|
||||
if f.opt.MaxAge > 0 {
|
||||
gob.Register(hashRecord{})
|
||||
db, err := kv.Start(ctx, "hasher", f.Fs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.db = db
|
||||
}
|
||||
|
||||
stubFeatures := &fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
IsLocal: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
}
|
||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
return f, err
|
||||
}
|
||||
|
||||
//
|
||||
// Filesystem
|
||||
//
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string { return f.name }
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string { return f.root }
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features { return f.features }
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set { return f.suppHashes }
|
||||
|
||||
// String returns a description of the FS
|
||||
// The "hasher::" prefix is a distinctive feature.
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("hasher::%s:%s", f.name, f.root)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs { return f.Fs }
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs { return f.wrapper }
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper }
|
||||
|
||||
// Wrap base entries into hasher entries.
|
||||
func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries, err error) {
|
||||
hashEntries = baseEntries[:0] // work inplace
|
||||
for _, entry := range baseEntries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
hashEntries = append(hashEntries, f.wrapObject(x, nil))
|
||||
default:
|
||||
hashEntries = append(hashEntries, entry) // trash in - trash out
|
||||
}
|
||||
}
|
||||
return hashEntries, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if entries, err = f.Fs.List(ctx, dir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapEntries(entries)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories recursively into out.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
return f.Fs.Features().ListR(ctx, dir, func(baseEntries fs.DirEntries) error {
|
||||
hashEntries, err := f.wrapEntries(baseEntries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(hashEntries)
|
||||
})
|
||||
}
|
||||
|
||||
// Purge a directory
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
if do := f.Fs.Features().Purge; do != nil {
|
||||
if err := do(ctx, dir); err != nil {
|
||||
return err
|
||||
}
|
||||
err := f.db.Do(true, &kvPurge{
|
||||
dir: path.Join(f.Fs.Root(), dir),
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to purge some hashes: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with undeterminate size.
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if do := f.Fs.Features().PutStream; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
}
|
||||
return nil, errors.New("PutStream not supported")
|
||||
}
|
||||
|
||||
// PutUnchecked uploads the object, allowing duplicates.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if do := f.Fs.Features().PutUnchecked; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
}
|
||||
return nil, errors.New("PutUnchecked not supported")
|
||||
}
|
||||
|
||||
// pruneHash deletes hash for a path
|
||||
func (f *Fs) pruneHash(remote string) error {
|
||||
return f.db.Do(true, &kvPrune{
|
||||
key: path.Join(f.Fs.Root(), remote),
|
||||
})
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
if do := f.Fs.Features().CleanUp; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return errors.New("CleanUp not supported")
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if do := f.Fs.Features().About; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil, errors.New("About not supported")
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
if do := f.Fs.Features().ChangeNotify; do != nil {
|
||||
do(ctx, notifyFunc, pollIntervalChan)
|
||||
}
|
||||
}
|
||||
|
||||
// UserInfo returns info about the connected user
|
||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||
if do := f.Fs.Features().UserInfo; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
|
||||
// Disconnect the current user
|
||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
if do := f.Fs.Features().Disconnect; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
if do := f.Fs.Features().MergeDirs; do != nil {
|
||||
return do(ctx, dirs)
|
||||
}
|
||||
return errors.New("MergeDirs not supported")
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
if do := f.Fs.Features().DirCacheFlush; do != nil {
|
||||
do()
|
||||
}
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
if do := f.Fs.Features().PublicLink; do != nil {
|
||||
return do(ctx, remote, expire, unlink)
|
||||
}
|
||||
return "", errors.New("PublicLink not supported")
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.Fs.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
o, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
oResult, err := do(ctx, o.Object, remote)
|
||||
return f.wrapObject(oResult, err), err
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.Fs.Features().Move
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
o, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
oResult, err := do(ctx, o.Object, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_ = f.db.Do(true, &kvMove{
|
||||
src: path.Join(f.Fs.Root(), src.Remote()),
|
||||
dst: path.Join(f.Fs.Root(), remote),
|
||||
dir: false,
|
||||
fs: f,
|
||||
})
|
||||
return f.wrapObject(oResult, nil), nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
do := f.Fs.Features().DirMove
|
||||
if do == nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
err := do(ctx, srcFs.Fs, srcRemote, dstRemote)
|
||||
if err == nil {
|
||||
_ = f.db.Do(true, &kvMove{
|
||||
src: path.Join(srcFs.Fs.Root(), srcRemote),
|
||||
dst: path.Join(f.Fs.Root(), dstRemote),
|
||||
dir: true,
|
||||
fs: f,
|
||||
})
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
||||
err = f.db.Stop(false)
|
||||
if do := f.Fs.Features().Shutdown; do != nil {
|
||||
if err2 := do(ctx); err2 != nil {
|
||||
err = err2
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o, err := f.Fs.NewObject(ctx, remote)
|
||||
return f.wrapObject(o, err), err
|
||||
}
|
||||
|
||||
//
|
||||
// Object
|
||||
//
|
||||
|
||||
// Object represents a composite file wrapping one or more data chunks
|
||||
type Object struct {
|
||||
fs.Object
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// Wrap base object into hasher object
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) *Object {
|
||||
if err != nil || o == nil {
|
||||
return nil
|
||||
}
|
||||
return &Object{Object: o, f: f}
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info { return o.f }
|
||||
|
||||
// UnWrap returns the wrapped Object
|
||||
func (o *Object) UnWrap() fs.Object { return o.Object }
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Object.String()
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if possible
|
||||
func (o *Object) ID() string {
|
||||
if doer, ok := o.Object.(fs.IDer); ok {
|
||||
return doer.ID()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetTier returns the Tier of the Object if possible
|
||||
func (o *Object) GetTier() string {
|
||||
if doer, ok := o.Object.(fs.GetTierer); ok {
|
||||
return doer.GetTier()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// SetTier set the Tier of the Object if possible
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
if doer, ok := o.Object.(fs.SetTierer); ok {
|
||||
return doer.SetTier(tier)
|
||||
}
|
||||
return errors.New("SetTier not supported")
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
if doer, ok := o.Object.(fs.MimeTyper); ok {
|
||||
return doer.MimeType(ctx)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
@@ -1,78 +0,0 @@
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
|
||||
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
||||
_, o := fstests.PutTestContents(ctx, t, f, &item, data, true)
|
||||
require.NotNil(t, o)
|
||||
return o
|
||||
}
|
||||
|
||||
func (f *Fs) testUploadFromCrypt(t *testing.T) {
|
||||
// make a temporary local remote
|
||||
tempRoot, err := fstest.LocalRemote()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempRoot)
|
||||
}()
|
||||
|
||||
// make a temporary crypt remote
|
||||
ctx := context.Background()
|
||||
pass := obscure.MustObscure("crypt")
|
||||
remote := fmt.Sprintf(":crypt,remote=%s,password=%s:", tempRoot, pass)
|
||||
cryptFs, err := fs.NewFs(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
|
||||
// make a test file on the crypt remote
|
||||
const dirName = "from_crypt_1"
|
||||
const fileName = dirName + "/file_from_crypt_1"
|
||||
const longTime = fs.ModTimeNotSupported
|
||||
src := putFile(ctx, t, cryptFs, fileName, "doggy froggy")
|
||||
|
||||
// ensure that hash does not exist yet
|
||||
_ = f.pruneHash(fileName)
|
||||
hashType := f.keepHashes.GetOne()
|
||||
hash, err := f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, hash)
|
||||
|
||||
// upload file to hasher
|
||||
in, err := src.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dst, err := f.Put(ctx, in, src)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, dst)
|
||||
|
||||
// check that hash was created
|
||||
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, hash)
|
||||
//t.Logf("hash is %q", hash)
|
||||
_ = operations.Purge(ctx, f, dirName)
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
if !kv.Supported() {
|
||||
t.Skip("hasher is not supported on this OS")
|
||||
}
|
||||
t.Run("UploadFromCrypt", f.testUploadFromCrypt)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
@@ -1,38 +0,0 @@
|
||||
package hasher_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/hasher"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all" // for integration tests
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if !kv.Supported() {
|
||||
t.Skip("hasher is not supported on this OS")
|
||||
}
|
||||
opt := fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*hasher.Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{},
|
||||
}
|
||||
if *fstest.RemoteName == "" {
|
||||
tempDir := filepath.Join(os.TempDir(), "rclone-hasher-test")
|
||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: "TestHasher", Key: "type", Value: "hasher"},
|
||||
{Name: "TestHasher", Key: "remote", Value: tempDir},
|
||||
}
|
||||
opt.RemoteName = "TestHasher:"
|
||||
}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
@@ -1,315 +0,0 @@
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
||||
anyFingerprint = "*"
|
||||
)
|
||||
|
||||
type hashMap map[hash.Type]string
|
||||
|
||||
type hashRecord struct {
|
||||
Fp string // fingerprint
|
||||
Hashes operations.HashSums
|
||||
Created time.Time
|
||||
}
|
||||
|
||||
func (r *hashRecord) encode(key string) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
if err := gob.NewEncoder(&buf).Encode(r); err != nil {
|
||||
fs.Debugf(key, "hasher encoding %v: %v", r, err)
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (r *hashRecord) decode(key string, data []byte) error {
|
||||
if err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(r); err != nil {
|
||||
fs.Debugf(key, "hasher decoding %q failed: %v", data, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// kvPrune: prune a single hash
|
||||
type kvPrune struct {
|
||||
key string
|
||||
}
|
||||
|
||||
func (op *kvPrune) Do(ctx context.Context, b kv.Bucket) error {
|
||||
return b.Delete([]byte(op.key))
|
||||
}
|
||||
|
||||
// kvPurge: delete a subtree
|
||||
type kvPurge struct {
|
||||
dir string
|
||||
}
|
||||
|
||||
func (op *kvPurge) Do(ctx context.Context, b kv.Bucket) error {
|
||||
dir := op.dir
|
||||
if !strings.HasSuffix(dir, "/") {
|
||||
dir += "/"
|
||||
}
|
||||
var items []string
|
||||
cur := b.Cursor()
|
||||
bkey, _ := cur.Seek([]byte(dir))
|
||||
for bkey != nil {
|
||||
key := string(bkey)
|
||||
if !strings.HasPrefix(key, dir) {
|
||||
break
|
||||
}
|
||||
items = append(items, key[len(dir):])
|
||||
bkey, _ = cur.Next()
|
||||
}
|
||||
nerr := 0
|
||||
for _, sub := range items {
|
||||
if err := b.Delete([]byte(dir + sub)); err != nil {
|
||||
nerr++
|
||||
}
|
||||
}
|
||||
fs.Debugf(dir, "%d hashes purged, %d failed", len(items)-nerr, nerr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// kvMove: assign hashes to new path
|
||||
type kvMove struct {
|
||||
src string
|
||||
dst string
|
||||
dir bool
|
||||
fs *Fs
|
||||
}
|
||||
|
||||
func (op *kvMove) Do(ctx context.Context, b kv.Bucket) error {
|
||||
src, dst := op.src, op.dst
|
||||
if !op.dir {
|
||||
err := moveHash(b, src, dst)
|
||||
fs.Debugf(op.fs, "moving cached hash %s to %s (err: %v)", src, dst, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(src, "/") {
|
||||
src += "/"
|
||||
}
|
||||
if !strings.HasSuffix(dst, "/") {
|
||||
dst += "/"
|
||||
}
|
||||
|
||||
var items []string
|
||||
cur := b.Cursor()
|
||||
bkey, _ := cur.Seek([]byte(src))
|
||||
for bkey != nil {
|
||||
key := string(bkey)
|
||||
if !strings.HasPrefix(key, src) {
|
||||
break
|
||||
}
|
||||
items = append(items, key[len(src):])
|
||||
bkey, _ = cur.Next()
|
||||
}
|
||||
|
||||
nerr := 0
|
||||
for _, suffix := range items {
|
||||
srcKey, dstKey := src+suffix, dst+suffix
|
||||
err := moveHash(b, srcKey, dstKey)
|
||||
fs.Debugf(op.fs, "Rename cache record %s -> %s (err: %v)", srcKey, dstKey, err)
|
||||
if err != nil {
|
||||
nerr++
|
||||
}
|
||||
}
|
||||
fs.Debugf(op.fs, "%d hashes moved, %d failed", len(items)-nerr, nerr)
|
||||
return nil
|
||||
}
|
||||
|
||||
func moveHash(b kv.Bucket, src, dst string) error {
|
||||
data := b.Get([]byte(src))
|
||||
err := b.Delete([]byte(src))
|
||||
if err != nil || len(data) == 0 {
|
||||
return err
|
||||
}
|
||||
return b.Put([]byte(dst), data)
|
||||
}
|
||||
|
||||
// kvGet: get single hash from database
|
||||
type kvGet struct {
|
||||
key string
|
||||
fp string
|
||||
hash string
|
||||
val string
|
||||
age time.Duration
|
||||
}
|
||||
|
||||
func (op *kvGet) Do(ctx context.Context, b kv.Bucket) error {
|
||||
data := b.Get([]byte(op.key))
|
||||
if len(data) == 0 {
|
||||
return errors.New("no record")
|
||||
}
|
||||
var r hashRecord
|
||||
if err := r.decode(op.key, data); err != nil {
|
||||
return errors.New("invalid record")
|
||||
}
|
||||
if !(r.Fp == anyFingerprint || op.fp == anyFingerprint || r.Fp == op.fp) {
|
||||
return errors.New("fingerprint changed")
|
||||
}
|
||||
if time.Since(r.Created) > op.age {
|
||||
return errors.New("record timed out")
|
||||
}
|
||||
if r.Hashes != nil {
|
||||
op.val = r.Hashes[op.hash]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// kvPut: set hashes for an object by key
|
||||
type kvPut struct {
|
||||
key string
|
||||
fp string
|
||||
hashes operations.HashSums
|
||||
age time.Duration
|
||||
}
|
||||
|
||||
func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
|
||||
data := b.Get([]byte(op.key))
|
||||
var r hashRecord
|
||||
if len(data) > 0 {
|
||||
err = r.decode(op.key, data)
|
||||
if err != nil || r.Fp != op.fp || time.Since(r.Created) > op.age {
|
||||
r.Hashes = nil
|
||||
}
|
||||
}
|
||||
if len(r.Hashes) == 0 {
|
||||
r.Created = time.Now()
|
||||
r.Hashes = operations.HashSums{}
|
||||
r.Fp = op.fp
|
||||
}
|
||||
|
||||
for hashType, hashVal := range op.hashes {
|
||||
r.Hashes[hashType] = hashVal
|
||||
}
|
||||
if data, err = r.encode(op.key); err != nil {
|
||||
return fmt.Errorf("marshal failed: %w", err)
|
||||
}
|
||||
if err = b.Put([]byte(op.key), data); err != nil {
|
||||
return fmt.Errorf("put failed: %w", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// kvDump: dump the database.
|
||||
// Note: long dump can cause concurrent operations to fail.
|
||||
type kvDump struct {
|
||||
full bool
|
||||
root string
|
||||
path string
|
||||
fs *Fs
|
||||
num int
|
||||
total int
|
||||
}
|
||||
|
||||
func (op *kvDump) Do(ctx context.Context, b kv.Bucket) error {
|
||||
f, baseRoot, dbPath := op.fs, op.root, op.path
|
||||
|
||||
if op.full {
|
||||
total := 0
|
||||
num := 0
|
||||
_ = b.ForEach(func(bkey, data []byte) error {
|
||||
total++
|
||||
key := string(bkey)
|
||||
include := (baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/"))
|
||||
var r hashRecord
|
||||
if err := r.decode(key, data); err != nil {
|
||||
fs.Errorf(nil, "%s: invalid record: %v", key, err)
|
||||
return nil
|
||||
}
|
||||
fmt.Println(f.dumpLine(&r, key, include, nil))
|
||||
if include {
|
||||
num++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
fs.Infof(dbPath, "%d records out of %d", num, total)
|
||||
op.num, op.total = num, total // for unit tests
|
||||
return nil
|
||||
}
|
||||
|
||||
num := 0
|
||||
cur := b.Cursor()
|
||||
var bkey, data []byte
|
||||
if baseRoot != "" {
|
||||
bkey, data = cur.Seek([]byte(baseRoot))
|
||||
} else {
|
||||
bkey, data = cur.First()
|
||||
}
|
||||
for bkey != nil {
|
||||
key := string(bkey)
|
||||
if !(baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/")) {
|
||||
break
|
||||
}
|
||||
var r hashRecord
|
||||
if err := r.decode(key, data); err != nil {
|
||||
fs.Errorf(nil, "%s: invalid record: %v", key, err)
|
||||
continue
|
||||
}
|
||||
if key = strings.TrimPrefix(key[len(baseRoot):], "/"); key == "" {
|
||||
key = "/"
|
||||
}
|
||||
fmt.Println(f.dumpLine(&r, key, true, nil))
|
||||
num++
|
||||
bkey, data = cur.Next()
|
||||
}
|
||||
fs.Infof(dbPath, "%d records", num)
|
||||
op.num = num // for unit tests
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) dumpLine(r *hashRecord, path string, include bool, err error) string {
|
||||
var status string
|
||||
switch {
|
||||
case !include:
|
||||
status = "ext"
|
||||
case err != nil:
|
||||
status = "bad"
|
||||
case r.Fp == anyFingerprint:
|
||||
status = "stk"
|
||||
default:
|
||||
status = "ok "
|
||||
}
|
||||
|
||||
var hashes []string
|
||||
for _, hashType := range f.keepHashes.Array() {
|
||||
hashName := hashType.String()
|
||||
hashVal := r.Hashes[hashName]
|
||||
if hashVal == "" || err != nil {
|
||||
hashVal = "-"
|
||||
}
|
||||
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType, false), hashVal)
|
||||
hashes = append(hashes, hashName+":"+hashVal)
|
||||
}
|
||||
hashesStr := strings.Join(hashes, " ")
|
||||
|
||||
age := time.Since(r.Created).Round(time.Second)
|
||||
if age > 24*time.Hour {
|
||||
age = age.Round(time.Hour)
|
||||
}
|
||||
if err != nil {
|
||||
age = 0
|
||||
}
|
||||
ageStr := age.String()
|
||||
if strings.HasSuffix(ageStr, "h0m0s") {
|
||||
ageStr = strings.TrimSuffix(ageStr, "0m0s")
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s %s %9s %s", status, hashesStr, ageStr, path)
|
||||
}
|
||||
@@ -1,305 +0,0 @@
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
// obtain hash for an object
|
||||
func (o *Object) getHash(ctx context.Context, hashType hash.Type) (string, error) {
|
||||
maxAge := time.Duration(o.f.opt.MaxAge)
|
||||
if maxAge <= 0 {
|
||||
return "", nil
|
||||
}
|
||||
fp := o.fingerprint(ctx)
|
||||
if fp == "" {
|
||||
return "", errors.New("fingerprint failed")
|
||||
}
|
||||
return o.f.getRawHash(ctx, hashType, o.Remote(), fp, maxAge)
|
||||
}
|
||||
|
||||
// obtain hash for a path
|
||||
func (f *Fs) getRawHash(ctx context.Context, hashType hash.Type, remote, fp string, age time.Duration) (string, error) {
|
||||
key := path.Join(f.Fs.Root(), remote)
|
||||
op := &kvGet{
|
||||
key: key,
|
||||
fp: fp,
|
||||
hash: hashType.String(),
|
||||
age: age,
|
||||
}
|
||||
err := f.db.Do(false, op)
|
||||
return op.val, err
|
||||
}
|
||||
|
||||
// put new hashes for an object
|
||||
func (o *Object) putHashes(ctx context.Context, rawHashes hashMap) error {
|
||||
if o.f.opt.MaxAge <= 0 {
|
||||
return nil
|
||||
}
|
||||
fp := o.fingerprint(ctx)
|
||||
if fp == "" {
|
||||
return nil
|
||||
}
|
||||
key := path.Join(o.f.Fs.Root(), o.Remote())
|
||||
hashes := operations.HashSums{}
|
||||
for hashType, hashVal := range rawHashes {
|
||||
hashes[hashType.String()] = hashVal
|
||||
}
|
||||
return o.f.putRawHashes(ctx, key, fp, hashes)
|
||||
}
|
||||
|
||||
// set hashes for a path without any validation
|
||||
func (f *Fs) putRawHashes(ctx context.Context, key, fp string, hashes operations.HashSums) error {
|
||||
return f.db.Do(true, &kvPut{
|
||||
key: key,
|
||||
fp: fp,
|
||||
hashes: hashes,
|
||||
age: time.Duration(f.opt.MaxAge),
|
||||
})
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file or "" if unavailable.
|
||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (hashVal string, err error) {
|
||||
f := o.f
|
||||
if f.passHashes.Contains(hashType) {
|
||||
fs.Debugf(o, "pass %s", hashType)
|
||||
return o.Object.Hash(ctx, hashType)
|
||||
}
|
||||
if !f.suppHashes.Contains(hashType) {
|
||||
fs.Debugf(o, "unsupp %s", hashType)
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if hashVal, err = o.getHash(ctx, hashType); err != nil {
|
||||
fs.Debugf(o, "getHash: %v", err)
|
||||
err = nil
|
||||
hashVal = ""
|
||||
}
|
||||
if hashVal != "" {
|
||||
fs.Debugf(o, "cached %s = %q", hashType, hashVal)
|
||||
return hashVal, nil
|
||||
}
|
||||
if f.slowHashes.Contains(hashType) {
|
||||
fs.Debugf(o, "slow %s", hashType)
|
||||
hashVal, err = o.Object.Hash(ctx, hashType)
|
||||
if err == nil && hashVal != "" && f.keepHashes.Contains(hashType) {
|
||||
if err = o.putHashes(ctx, hashMap{hashType: hashVal}); err != nil {
|
||||
fs.Debugf(o, "putHashes: %v", err)
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return hashVal, err
|
||||
}
|
||||
if f.autoHashes.Contains(hashType) && o.Size() < int64(f.opt.AutoSize) {
|
||||
_ = o.updateHashes(ctx)
|
||||
if hashVal, err = o.getHash(ctx, hashType); err != nil {
|
||||
fs.Debugf(o, "auto %s = %q (%v)", hashType, hashVal, err)
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return hashVal, err
|
||||
}
|
||||
|
||||
// updateHashes performs implicit "rclone hashsum --download" and updates cache.
|
||||
func (o *Object) updateHashes(ctx context.Context) error {
|
||||
r, err := o.Open(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(o, "update failed (open): %v", err)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = r.Close()
|
||||
}()
|
||||
if _, err = io.Copy(ioutil.Discard, r); err != nil {
|
||||
fs.Infof(o, "update failed (copy): %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the object with the given data, time and size.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
_ = o.f.pruneHash(src.Remote())
|
||||
return o.Object.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Remove an object.
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
_ = o.f.pruneHash(o.Remote())
|
||||
return o.Object.Remove(ctx)
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the file.
|
||||
// Also prunes the cache entry when modtime changes so that
|
||||
// touching a file will trigger checksum recalculation even
|
||||
// on backends that don't provide modTime with fingerprint.
|
||||
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||
if mtime != o.Object.ModTime(ctx) {
|
||||
_ = o.f.pruneHash(o.Remote())
|
||||
}
|
||||
return o.Object.SetModTime(ctx, mtime)
|
||||
}
|
||||
|
||||
// Open opens the file for read.
|
||||
// Full reads will also update object hashes.
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadCloser, err error) {
|
||||
size := o.Size()
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch opt := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = opt.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = opt.Decode(size)
|
||||
}
|
||||
}
|
||||
if offset < 0 {
|
||||
return nil, errors.New("invalid offset")
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = size - offset
|
||||
}
|
||||
if r, err = o.Object.Open(ctx, options...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if offset != 0 || limit < size {
|
||||
// It's a partial read
|
||||
return r, err
|
||||
}
|
||||
return o.f.newHashingReader(ctx, r, func(sums hashMap) {
|
||||
if err := o.putHashes(ctx, sums); err != nil {
|
||||
fs.Infof(o, "auto hashing error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Put data into the remote path with given modTime and size
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
var (
|
||||
o *Object
|
||||
common hash.Set
|
||||
rehash bool
|
||||
hashes hashMap
|
||||
)
|
||||
if fsrc := src.Fs(); fsrc != nil {
|
||||
common = fsrc.Hashes().Overlap(f.keepHashes)
|
||||
// Rehash if source does not have all required hashes or hashing is slow
|
||||
rehash = fsrc.Features().SlowHash || common != f.keepHashes
|
||||
}
|
||||
|
||||
wrapIn := in
|
||||
if rehash {
|
||||
r, err := f.newHashingReader(ctx, in, func(sums hashMap) {
|
||||
hashes = sums
|
||||
})
|
||||
fs.Debugf(src, "Rehash in-fly due to incomplete or slow source set %v (err: %v)", common, err)
|
||||
if err == nil {
|
||||
wrapIn = r
|
||||
} else {
|
||||
rehash = false
|
||||
}
|
||||
}
|
||||
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
|
||||
o = f.wrapObject(oResult, err)
|
||||
if o == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !rehash {
|
||||
hashes = hashMap{}
|
||||
for _, ht := range common.Array() {
|
||||
if h, e := src.Hash(ctx, ht); e == nil && h != "" {
|
||||
hashes[ht] = h
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(hashes) > 0 {
|
||||
err := o.putHashes(ctx, hashes)
|
||||
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
|
||||
}
|
||||
return o, err
|
||||
}
|
||||
|
||||
type hashingReader struct {
|
||||
rd io.Reader
|
||||
hasher *hash.MultiHasher
|
||||
fun func(hashMap)
|
||||
}
|
||||
|
||||
func (f *Fs) newHashingReader(ctx context.Context, rd io.Reader, fun func(hashMap)) (*hashingReader, error) {
|
||||
hasher, err := hash.NewMultiHasherTypes(f.keepHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hr := &hashingReader{
|
||||
rd: rd,
|
||||
hasher: hasher,
|
||||
fun: fun,
|
||||
}
|
||||
return hr, nil
|
||||
}
|
||||
|
||||
func (r *hashingReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.rd.Read(p)
|
||||
if err != nil && err != io.EOF {
|
||||
r.hasher = nil
|
||||
}
|
||||
if r.hasher != nil {
|
||||
if _, errHash := r.hasher.Write(p[:n]); errHash != nil {
|
||||
r.hasher = nil
|
||||
err = errHash
|
||||
}
|
||||
}
|
||||
if err == io.EOF && r.hasher != nil {
|
||||
r.fun(r.hasher.Sums())
|
||||
r.hasher = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *hashingReader) Close() error {
|
||||
if rc, ok := r.rd.(io.ReadCloser); ok {
|
||||
return rc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return object fingerprint or empty string in case of errors
|
||||
//
|
||||
// Note that we can't use the generic `fs.Fingerprint` here because
|
||||
// this fingerprint is used to pick _derived hashes_ that are slow
|
||||
// to calculate or completely unsupported by the base remote.
|
||||
//
|
||||
// The hasher fingerprint must be based on `fsHash`, the first _fast_
|
||||
// hash supported _by the underlying remote_ (if there is one),
|
||||
// while `fs.Fingerprint` would select a hash _produced by hasher_
|
||||
// creating unresolvable fingerprint loop.
|
||||
func (o *Object) fingerprint(ctx context.Context) string {
|
||||
size := o.Object.Size()
|
||||
timeStr := "-"
|
||||
if o.f.fpTime {
|
||||
timeStr = o.Object.ModTime(ctx).UTC().Format(timeFormat)
|
||||
if timeStr == "" {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
hashStr := "-"
|
||||
if o.f.fpHash != hash.None {
|
||||
var err error
|
||||
hashStr, err = o.Object.Hash(ctx, o.f.fpHash)
|
||||
if hashStr == "" || err != nil {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%d,%s,%s", size, timeStr, hashStr)
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
@@ -263,98 +262,6 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.client.RemoveAll(realpath)
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Get the real paths from the remote specs:
|
||||
sourcePath := srcObj.fs.realpath(srcObj.remote)
|
||||
targetPath := f.realpath(remote)
|
||||
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
|
||||
|
||||
// Make sure the target folder exists:
|
||||
dirname := path.Dir(targetPath)
|
||||
err := f.client.MkdirAll(dirname, 0755)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
// Note that the underlying HDFS library hard-codes Overwrite=True, but this is expected rclone behaviour.
|
||||
err = f.client.Rename(sourcePath, targetPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Look up the resulting object
|
||||
info, err := f.client.Stat(targetPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// And return it:
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: info.Size(),
|
||||
modTime: info.ModTime(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Get the real paths from the remote specs:
|
||||
sourcePath := srcFs.realpath(srcRemote)
|
||||
targetPath := f.realpath(dstRemote)
|
||||
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
|
||||
|
||||
// Check if the destination exists:
|
||||
info, err := f.client.Stat(targetPath)
|
||||
if err == nil {
|
||||
fs.Debugf(f, "target directory already exits, IsDir = [%t]", info.IsDir())
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Make sure the targets parent folder exists:
|
||||
dirname := path.Dir(targetPath)
|
||||
err = f.client.MkdirAll(dirname, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
err = f.client.Rename(sourcePath, targetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
info, err := f.client.StatFs()
|
||||
@@ -410,6 +317,4 @@ var (
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
@@ -19,28 +18,35 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "namenode",
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Help: "hadoop name node and port",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "namenode:8020",
|
||||
Help: "Connect to host namenode at port 8020",
|
||||
}},
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
Help: "hadoop user name",
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "root",
|
||||
Help: "Connect to hdfs as root.",
|
||||
Help: "Connect to hdfs as root",
|
||||
}},
|
||||
}, {
|
||||
Name: "service_principal_name",
|
||||
Help: `Kerberos service principal name for the namenode.
|
||||
Help: `Kerberos service principal name for the namenode
|
||||
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||
(<SERVICE>/<FQDN>) for the namenode.`,
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "hdfs/namenode.hadoop.docker",
|
||||
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "data_transfer_protection",
|
||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
|
||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy
|
||||
|
||||
Specifies whether or not authentication, data signature integrity
|
||||
checks, and wire encryption is required when communicating the the
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Test HDFS filesystem interface
|
||||
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs_test
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// Build for hdfs for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9
|
||||
// +build plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
@@ -6,8 +6,6 @@ package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
@@ -18,6 +16,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
@@ -39,18 +38,25 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of http host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||
Help: "URL of http host to connect to",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
}, {
|
||||
Value: "https://user:pass@example.com",
|
||||
Help: "Connect to example.com using a username and password",
|
||||
}},
|
||||
}, {
|
||||
Name: "headers",
|
||||
Help: `Set HTTP headers for all transactions.
|
||||
Help: `Set HTTP headers for all transactions
|
||||
|
||||
Use this to set additional HTTP headers for all transactions.
|
||||
Use this to set additional HTTP headers for all transactions
|
||||
|
||||
The input format is comma separated list of key,value pairs. Standard
|
||||
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
||||
|
||||
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
||||
`,
|
||||
@@ -58,7 +64,7 @@ You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_slash",
|
||||
Help: `Set this if the site doesn't end directories with /.
|
||||
Help: `Set this if the site doesn't end directories with /
|
||||
|
||||
Use this if your target website does not use / on the end of
|
||||
directories.
|
||||
@@ -74,7 +80,7 @@ directories.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head",
|
||||
Help: `Don't use HEAD requests to find file sizes in dir listing.
|
||||
Help: `Don't use HEAD requests to find file sizes in dir listing
|
||||
|
||||
If your site is being very slow to load then you can try this option.
|
||||
Normally rclone does a HEAD request for each potential file in a
|
||||
@@ -133,7 +139,7 @@ func statusError(res *http.Response, err error) error {
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
_ = res.Body.Close()
|
||||
return fmt.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
|
||||
return errors.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -378,15 +384,15 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
URL := f.url(dir)
|
||||
u, err := url.Parse(URL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to readDir: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to readDir")
|
||||
}
|
||||
if !strings.HasSuffix(URL, "/") {
|
||||
return nil, fmt.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
}
|
||||
// Do the request
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
||||
return nil, errors.Wrap(err, "readDir failed")
|
||||
}
|
||||
f.addHeaders(req)
|
||||
res, err := f.httpClient.Do(req)
|
||||
@@ -398,7 +404,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to readDir: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to readDir")
|
||||
}
|
||||
|
||||
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
|
||||
@@ -406,10 +412,10 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
case "text/html":
|
||||
names, err = parse(u, res.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("readDir: %w", err)
|
||||
return nil, errors.Wrap(err, "readDir")
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("Can't parse content type %q", contentType)
|
||||
return nil, errors.Errorf("Can't parse content type %q", contentType)
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
@@ -429,7 +435,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
names, err := f.readDir(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing %q: %w", dir, err)
|
||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
||||
}
|
||||
var (
|
||||
entriesMu sync.Mutex // to protect entries
|
||||
@@ -541,7 +547,7 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
url := o.url()
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat failed: %w", err)
|
||||
return errors.Wrap(err, "stat failed")
|
||||
}
|
||||
o.fs.addHeaders(req)
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
@@ -550,7 +556,7 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stat: %w", err)
|
||||
return errors.Wrap(err, "failed to stat")
|
||||
}
|
||||
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
@@ -563,7 +569,7 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
if o.fs.opt.NoSlash {
|
||||
mediaType, _, err := mime.ParseMediaType(o.contentType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse Content-Type: %q: %w", o.contentType, err)
|
||||
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
|
||||
}
|
||||
if mediaType == "text/html" {
|
||||
return fs.ErrorNotAFile
|
||||
@@ -589,7 +595,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
url := o.url()
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
}
|
||||
|
||||
// Add optional headers
|
||||
@@ -602,7 +608,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
// Configure the remote
|
||||
configfile.Install()
|
||||
configfile.LoadConfig(context.Background())
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
|
||||
@@ -9,14 +9,15 @@ package hubic
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
swiftLib "github.com/ncw/swift/v2"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -55,10 +56,11 @@ func init() {
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config(ctx, "hubic", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||
})
|
||||
@@ -120,7 +122,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
|
||||
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
}
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
var result credentials
|
||||
@@ -146,7 +148,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Hubic")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
@@ -163,7 +165,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
err = c.Authenticate(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
|
||||
return nil, errors.Wrap(err, "error authenticating swift connection")
|
||||
}
|
||||
|
||||
// Parse config into swift.Options struct
|
||||
|
||||
@@ -2,9 +2,10 @@ package api
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -367,7 +368,6 @@ type JottaFile struct {
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicURI string `xml:"publicURI"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt Time `xml:"currentRevision>created"`
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -32,29 +32,29 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use.",
|
||||
Help: "The Koofr API endpoint to use",
|
||||
Default: "https://app.koofr.net",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
|
||||
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "setmtime",
|
||||
Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||
Default: true,
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name.",
|
||||
Help: "Your Koofr user name",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
@@ -344,7 +344,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err e
|
||||
return nil, translateErrorsObject(err)
|
||||
}
|
||||
if info.Type == "dir" {
|
||||
return nil, fs.ErrorIsDir
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return &Object{
|
||||
fs: f,
|
||||
@@ -534,7 +534,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return nil
|
||||
}
|
||||
|
||||
// About reports space usage (with a MiB precision)
|
||||
// About reports space usage (with a MB precision)
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
mount, err := f.client.MountsDetails(f.mountID)
|
||||
if err != nil {
|
||||
@@ -608,25 +608,5 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
if err != nil {
|
||||
return "", translateErrorsDir(err)
|
||||
}
|
||||
|
||||
// URL returned by API looks like following:
|
||||
//
|
||||
// https://app.koofr.net/links/35d9fb92-74a3-4930-b4ed-57f123bfb1a6
|
||||
//
|
||||
// Direct url looks like following:
|
||||
//
|
||||
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
|
||||
//
|
||||
// I am not sure about meaning of "path" parameter; in my expriments
|
||||
// it is always "%2F", and omitting it or putting any other value
|
||||
// results in 404.
|
||||
//
|
||||
// There is one more quirk: direct link to file in / returns that file,
|
||||
// direct link to file somewhere else in hierarchy returns zip archive
|
||||
// with one member.
|
||||
link := linkData.URL
|
||||
link = strings.ReplaceAll(link, "/links", "/content/links")
|
||||
link += "/files/get?path=%2F"
|
||||
|
||||
return link, nil
|
||||
return linkData.ShortURL, nil
|
||||
}
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
//go:build darwin || dragonfly || freebsd || linux
|
||||
// +build darwin dragonfly freebsd linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
@@ -20,7 +19,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read disk usage: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||
}
|
||||
bs := int64(s.Bsize) // nolint: unconvert
|
||||
usage := &fs.Usage{
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
@@ -24,7 +23,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
||||
)
|
||||
if e1 != syscall.Errno(0) {
|
||||
return nil, fmt.Errorf("failed to read disk usage: %w", e1)
|
||||
return nil, errors.Wrap(e1, "failed to read disk usage")
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(total), // quota of bytes that can be used
|
||||
|
||||
11
backend/local/encode_darwin.go
Normal file
11
backend/local/encode_darwin.go
Normal file
@@ -0,0 +1,11 @@
|
||||
//+build darwin
|
||||
|
||||
package local
|
||||
|
||||
import "github.com/rclone/rclone/lib/encoder"
|
||||
|
||||
// This is the encoding used by the local backend for macOS
|
||||
//
|
||||
// macOS can't store invalid UTF-8, it converts them into %XX encoding
|
||||
const defaultEnc = (encoder.Base |
|
||||
encoder.EncodeInvalidUtf8)
|
||||
8
backend/local/encode_other.go
Normal file
8
backend/local/encode_other.go
Normal file
@@ -0,0 +1,8 @@
|
||||
//+build !windows,!darwin
|
||||
|
||||
package local
|
||||
|
||||
import "github.com/rclone/rclone/lib/encoder"
|
||||
|
||||
// This is the encoding used by the local backend for non windows platforms
|
||||
const defaultEnc = encoder.Base
|
||||
@@ -1,9 +1,10 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
//+build windows
|
||||
|
||||
package encoder
|
||||
package local
|
||||
|
||||
// OS is the encoding used by the local backend for windows platforms
|
||||
import "github.com/rclone/rclone/lib/encoder"
|
||||
|
||||
// This is the encoding used by the local backend for windows platforms
|
||||
//
|
||||
// List of replaced characters:
|
||||
// < (less than) -> '<' // FULLWIDTH LESS-THAN SIGN
|
||||
@@ -23,10 +24,10 @@ package encoder
|
||||
// Also encode invalid UTF-8 bytes as Go can't convert them to UTF-16.
|
||||
//
|
||||
// https://docs.microsoft.com/de-de/windows/desktop/FileIO/naming-a-file#naming-conventions
|
||||
const OS = (Base |
|
||||
EncodeWin |
|
||||
EncodeBackSlash |
|
||||
EncodeCtl |
|
||||
EncodeRightSpace |
|
||||
EncodeRightPeriod |
|
||||
EncodeInvalidUtf8)
|
||||
const defaultEnc = (encoder.Base |
|
||||
encoder.EncodeWin |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightPeriod |
|
||||
encoder.EncodeInvalidUtf8)
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
//+build !linux
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
//+build linux
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build windows || plan9 || js
|
||||
// +build windows plan9 js
|
||||
|
||||
package local
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !windows && !plan9 && !js
|
||||
// +build !windows,!plan9,!js
|
||||
|
||||
package local
|
||||
|
||||
@@ -4,7 +4,6 @@ package local
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -17,6 +16,7 @@ import (
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -44,11 +43,11 @@ func init() {
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names.",
|
||||
Help: "Disables long file names",
|
||||
}},
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
@@ -59,7 +58,7 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
@@ -67,7 +66,6 @@ func init() {
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
|
||||
This flag disables warning messages on skipped symlinks or junction
|
||||
points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Default: false,
|
||||
@@ -75,39 +73,30 @@ points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
Help: `Assume the Stat size of links is zero (and read them instead)
|
||||
|
||||
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
|
||||
On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0.
|
||||
However, on unix it reads as the length of the text in the link. This may cause errors like this when
|
||||
syncing:
|
||||
|
||||
- Windows
|
||||
- On some virtual filesystems (such ash LucidLink)
|
||||
- Android
|
||||
Failed to copy: corrupted on transfer: sizes differ 0 vs 13
|
||||
|
||||
So rclone now always reads the link.
|
||||
`,
|
||||
Setting this flag causes rclone to read the link and use that as the size of the link
|
||||
instead of 0 which in most cases fixes the problem.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames.
|
||||
Name: "no_unicode_normalization",
|
||||
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
|
||||
This flag can be used to normalize file names into unicode NFC form
|
||||
that are read from the local filesystem.
|
||||
|
||||
Rclone does not normally touch the encoding of file names it reads from
|
||||
the file system.
|
||||
|
||||
This can be useful when using macOS as it normally provides decomposed (NFD)
|
||||
unicode which in some language (eg Korean) doesn't display properly on
|
||||
some OSes.
|
||||
|
||||
Note that rclone compares filenames with unicode normalization in the sync
|
||||
routine so this flag shouldn't normally be used.`,
|
||||
This flag is deprecated now. Rclone no longer normalizes unicode file
|
||||
names, but it compares them with unicode normalization in the sync
|
||||
routine instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
Help: `Don't check to see if the files change during upload
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy
|
||||
@@ -153,7 +142,7 @@ to override the default choice.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_insensitive",
|
||||
Help: `Force the filesystem to report itself as case insensitive.
|
||||
Help: `Force the filesystem to report itself as case insensitive
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
@@ -162,7 +151,7 @@ to override the default choice.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files.
|
||||
Help: `Disable preallocation of disk space for transferred files
|
||||
|
||||
Preallocation of disk space helps prevent filesystem fragmentation.
|
||||
However, some virtual filesystem layers (such as Google Drive File
|
||||
@@ -173,7 +162,7 @@ Use this flag to disable preallocation.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads.
|
||||
Help: `Disable sparse files for multi-thread downloads
|
||||
|
||||
On Windows platforms rclone will make sparse files when doing
|
||||
multi-thread downloads. This avoids long pauses on large files where
|
||||
@@ -183,7 +172,7 @@ cause disk fragmentation and can be slow to work with.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime.
|
||||
Help: `Disable setting modtime
|
||||
|
||||
Normally rclone updates modification time of files after they are done
|
||||
uploading. This can cause permissions issues on Linux platforms when
|
||||
@@ -196,7 +185,7 @@ enabled, rclone will no longer update the modtime after copying a file.`,
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.OS,
|
||||
Default: defaultEnc,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -207,7 +196,8 @@ type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
TranslateSymlinks bool `config:"links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
UTFNorm bool `config:"unicode_normalization"`
|
||||
ZeroSizeLinks bool `config:"zero_size_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
@@ -266,6 +256,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, errLinksAndCopyLinks
|
||||
}
|
||||
|
||||
if opt.NoUTFNorm {
|
||||
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
@@ -402,7 +396,7 @@ func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, erro
|
||||
|
||||
}
|
||||
if o.mode.IsDir() {
|
||||
return nil, fs.ErrorIsDir
|
||||
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
@@ -432,7 +426,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
fd, err := os.Open(fsDirPath)
|
||||
if err != nil {
|
||||
isPerm := os.IsPermission(err)
|
||||
err = fmt.Errorf("failed to open directory %q: %w", dir, err)
|
||||
err = errors.Wrapf(err, "failed to open directory %q", dir)
|
||||
fs.Errorf(dir, "%v", err)
|
||||
if isPerm {
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
|
||||
@@ -443,7 +437,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
defer func() {
|
||||
cerr := fd.Close()
|
||||
if cerr != nil && err == nil {
|
||||
err = fmt.Errorf("failed to close directory %q:: %w", dir, cerr)
|
||||
err = errors.Wrapf(cerr, "failed to close directory %q:", dir)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -468,12 +462,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
for _, name := range names {
|
||||
namepath := filepath.Join(fsDirPath, name)
|
||||
fi, fierr := os.Lstat(namepath)
|
||||
if os.IsNotExist(fierr) {
|
||||
// skip entry removed by a concurrent goroutine
|
||||
continue
|
||||
}
|
||||
if fierr != nil {
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
|
||||
err = errors.Wrapf(err, "failed to read directory %q", namepath)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
@@ -483,7 +473,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read directory entry: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to read directory entry")
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
@@ -496,7 +486,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
fi, err = os.Stat(localPath)
|
||||
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
||||
// Skip bad symlinks and circular symlinks
|
||||
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
|
||||
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
||||
fs.Errorf(newRemote, "Listing error: %v", err)
|
||||
err = accounting.Stats(ctx).Error(err)
|
||||
continue
|
||||
@@ -532,9 +522,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
|
||||
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
|
||||
if f.opt.UTFNorm {
|
||||
filename = norm.NFC.String(filename)
|
||||
}
|
||||
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
|
||||
|
||||
if !utf8.ValidString(filename) {
|
||||
@@ -570,8 +557,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
|
||||
localPath := f.localPath(dir)
|
||||
err := file.MkdirAll(localPath, 0777)
|
||||
err := os.MkdirAll(localPath, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -672,7 +660,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
if !fi.Mode().IsDir() {
|
||||
return fmt.Errorf("can't purge non directory: %q", dir)
|
||||
return errors.Errorf("can't purge non directory: %q", dir)
|
||||
}
|
||||
return os.RemoveAll(dir)
|
||||
}
|
||||
@@ -765,7 +753,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// Create parent of destination
|
||||
dstParentPath := filepath.Dir(dstPath)
|
||||
err = file.MkdirAll(dstParentPath, 0777)
|
||||
err = os.MkdirAll(dstParentPath, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -866,12 +854,12 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
err := o.lstat()
|
||||
var changed bool
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
// If file not found then we assume any accumulated
|
||||
// hashes are OK - this will error on Open
|
||||
changed = true
|
||||
} else {
|
||||
return "", fmt.Errorf("hash: failed to stat: %w", err)
|
||||
return "", errors.Wrap(err, "hash: failed to stat")
|
||||
}
|
||||
} else {
|
||||
o.fs.objectMetaMu.RLock()
|
||||
@@ -900,16 +888,16 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
in = readers.NewLimitedReadCloser(in, o.size)
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("hash: failed to open: %w", err)
|
||||
return "", errors.Wrap(err, "hash: failed to open")
|
||||
}
|
||||
var hashes map[hash.Type]string
|
||||
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
|
||||
closeErr := in.Close()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("hash: failed to read: %w", err)
|
||||
return "", errors.Wrap(err, "hash: failed to read")
|
||||
}
|
||||
if closeErr != nil {
|
||||
return "", fmt.Errorf("hash: failed to close: %w", closeErr)
|
||||
return "", errors.Wrap(closeErr, "hash: failed to close")
|
||||
}
|
||||
hashValue = hashes[r]
|
||||
o.fs.objectMetaMu.Lock()
|
||||
@@ -990,17 +978,17 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||
// Check if file has the same size and modTime
|
||||
fi, err := file.fd.Stat()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("can't read status of source file while transferring: %w", err)
|
||||
return 0, errors.Wrap(err, "can't read status of source file while transferring")
|
||||
}
|
||||
file.o.fs.objectMetaMu.RLock()
|
||||
oldtime := file.o.modTime
|
||||
oldsize := file.o.size
|
||||
file.o.fs.objectMetaMu.RUnlock()
|
||||
if oldsize != fi.Size() {
|
||||
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
|
||||
}
|
||||
if !oldtime.Equal(fi.ModTime()) {
|
||||
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1099,7 +1087,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// mkdirAll makes all the directories needed to store the object
|
||||
func (o *Object) mkdirAll() error {
|
||||
dir := filepath.Dir(o.path)
|
||||
return file.MkdirAll(dir, 0777)
|
||||
return os.MkdirAll(dir, 0777)
|
||||
}
|
||||
|
||||
type nopWriterCloser struct {
|
||||
@@ -1279,13 +1267,9 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
o.modTime = info.ModTime()
|
||||
o.mode = info.Mode()
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
// Read the size of the link.
|
||||
//
|
||||
// The value in info.Size() is not always correct
|
||||
// - Windows links read as 0 size
|
||||
// - Some virtual filesystems (such ash LucidLink) links read as 0 size
|
||||
// - Android - some versions the links are larger than readlink suggests
|
||||
if o.translatedLink {
|
||||
// On Windows links read as 0 size so set the correct size here
|
||||
// Optionally, users can turn this feature on with the zero_size_links flag
|
||||
if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink {
|
||||
linkdst, err := os.Readlink(o.path)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to read link size: %v", err)
|
||||
|
||||
@@ -93,16 +93,16 @@ func TestSymlink(t *testing.T) {
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
|
||||
// Check with no symlink flags
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
r.CheckLocalItems(t, file1, file2d)
|
||||
r.CheckRemoteItems(t)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2d)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
// Set fs into "-l" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
@@ -111,7 +111,7 @@ func TestSymlink(t *testing.T) {
|
||||
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
r.CheckLocalItems(t, file1, file2)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
}
|
||||
|
||||
// Create a symlink
|
||||
@@ -119,7 +119,7 @@ func TestSymlink(t *testing.T) {
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
r.CheckLocalItems(t, file1, file2, file3)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
}
|
||||
|
||||
// Check it got the correct contents
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Device reading functions
|
||||
|
||||
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
|
||||
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
|
||||
|
||||
package local
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Device reading functions
|
||||
|
||||
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package local
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user