mirror of
https://github.com/rclone/rclone.git
synced 2025-12-13 23:03:19 +00:00
Compare commits
1 Commits
v1.57.0
...
fix-connec
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bede3a5d48 |
9
.github/ISSUE_TEMPLATE/Bug.md
vendored
9
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -37,6 +37,7 @@ The Rclone Developers
|
|||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
|
||||||
#### The associated forum post URL from `https://forum.rclone.org`
|
#### The associated forum post URL from `https://forum.rclone.org`
|
||||||
|
|
||||||
|
|
||||||
@@ -64,11 +65,3 @@ The Rclone Developers
|
|||||||
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<!--- Please keep the note below for others who read your bug report. -->
|
|
||||||
|
|
||||||
#### How to use GitHub
|
|
||||||
|
|
||||||
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
|
||||||
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
|
||||||
* Subscribe to receive notifications on status change and new comments.
|
|
||||||
|
|||||||
9
.github/ISSUE_TEMPLATE/Feature.md
vendored
9
.github/ISSUE_TEMPLATE/Feature.md
vendored
@@ -26,6 +26,7 @@ The Rclone Developers
|
|||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
|
||||||
#### The associated forum post URL from `https://forum.rclone.org`
|
#### The associated forum post URL from `https://forum.rclone.org`
|
||||||
|
|
||||||
|
|
||||||
@@ -41,11 +42,3 @@ The Rclone Developers
|
|||||||
#### How do you think rclone should be changed to solve that?
|
#### How do you think rclone should be changed to solve that?
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<!--- Please keep the note below for others who read your feature request. -->
|
|
||||||
|
|
||||||
#### How to use GitHub
|
|
||||||
|
|
||||||
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
|
||||||
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
|
||||||
* Subscribe to receive notifications on status change and new comments.
|
|
||||||
|
|||||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -22,7 +22,7 @@ Link issues and relevant forum posts here.
|
|||||||
|
|
||||||
#### Checklist
|
#### Checklist
|
||||||
|
|
||||||
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-new-feature-or-bug-fix).
|
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||||
- [ ] I have added tests for all changes in this PR if appropriate.
|
- [ ] I have added tests for all changes in this PR if appropriate.
|
||||||
- [ ] I have added documentation for the changes if appropriate.
|
- [ ] I have added documentation for the changes if appropriate.
|
||||||
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||||
|
|||||||
83
.github/workflows/build.yml
vendored
83
.github/workflows/build.yml
vendored
@@ -12,36 +12,29 @@ on:
|
|||||||
tags:
|
tags:
|
||||||
- '*'
|
- '*'
|
||||||
pull_request:
|
pull_request:
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
manual:
|
|
||||||
required: true
|
|
||||||
default: true
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.14', 'go1.15', 'go1.16']
|
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- job_name: linux
|
- job_name: linux
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.17.x'
|
go: '1.16.x'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^linux/"'
|
build_flags: '-include "^linux/"'
|
||||||
check: true
|
check: true
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
librclonetest: true
|
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: mac_amd64
|
- job_name: mac_amd64
|
||||||
os: macOS-latest
|
os: macOS-latest
|
||||||
go: '1.17.x'
|
go: '1.16.x'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/amd64" -cgo'
|
build_flags: '-include "^darwin/amd64" -cgo'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
@@ -50,14 +43,14 @@ jobs:
|
|||||||
|
|
||||||
- job_name: mac_arm64
|
- job_name: mac_arm64
|
||||||
os: macOS-latest
|
os: macOS-latest
|
||||||
go: '1.17.x'
|
go: '1.16.x'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: windows_amd64
|
- job_name: windows_amd64
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '1.17.x'
|
go: '1.16.x'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^windows/amd64" -cgo'
|
build_flags: '-include "^windows/amd64" -cgo'
|
||||||
build_args: '-buildmode exe'
|
build_args: '-buildmode exe'
|
||||||
@@ -67,7 +60,7 @@ jobs:
|
|||||||
|
|
||||||
- job_name: windows_386
|
- job_name: windows_386
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '1.17.x'
|
go: '1.16.x'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
goarch: '386'
|
goarch: '386'
|
||||||
cgo: '1'
|
cgo: '1'
|
||||||
@@ -78,11 +71,16 @@ jobs:
|
|||||||
|
|
||||||
- job_name: other_os
|
- job_name: other_os
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.17.x'
|
go: '1.16.x'
|
||||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||||
compile_all: true
|
compile_all: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
|
- job_name: go1.13
|
||||||
|
os: ubuntu-latest
|
||||||
|
go: '1.13.x'
|
||||||
|
quicktest: true
|
||||||
|
|
||||||
- job_name: go1.14
|
- job_name: go1.14
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.14.x'
|
go: '1.14.x'
|
||||||
@@ -95,12 +93,6 @@ jobs:
|
|||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
- job_name: go1.16
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.16.x'
|
|
||||||
quicktest: true
|
|
||||||
racequicktest: true
|
|
||||||
|
|
||||||
name: ${{ matrix.job_name }}
|
name: ${{ matrix.job_name }}
|
||||||
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
@@ -195,13 +187,12 @@ jobs:
|
|||||||
make racequicktest
|
make racequicktest
|
||||||
if: matrix.racequicktest
|
if: matrix.racequicktest
|
||||||
|
|
||||||
- name: Run librclone tests
|
- name: Code quality test
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make -C librclone/ctest test
|
make build_dep
|
||||||
make -C librclone/ctest clean
|
make check
|
||||||
librclone/python/test_rclone.py
|
if: matrix.check
|
||||||
if: matrix.librclonetest
|
|
||||||
|
|
||||||
- name: Compile all architectures test
|
- name: Compile all architectures test
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -222,24 +213,7 @@ jobs:
|
|||||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||||
|
|
||||||
lint:
|
|
||||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
|
||||||
timeout-minutes: 30
|
|
||||||
name: "lint"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- name: Code quality test
|
|
||||||
uses: golangci/golangci-lint-action@v2
|
|
||||||
with:
|
|
||||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
|
||||||
version: latest
|
|
||||||
|
|
||||||
android:
|
android:
|
||||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
name: "android-all"
|
name: "android-all"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -247,18 +221,16 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
# Upgrade together with NDK version
|
# Upgrade together with NDK version
|
||||||
- name: Set up Go 1.16
|
- name: Set up Go 1.14
|
||||||
uses: actions/setup-go@v1
|
uses: actions/setup-go@v1
|
||||||
with:
|
with:
|
||||||
go-version: 1.16
|
go-version: 1.14
|
||||||
|
|
||||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||||
- name: Force NDK version
|
- name: Force NDK version
|
||||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
|
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
|
||||||
|
|
||||||
- name: Go module cache
|
- name: Go module cache
|
||||||
uses: actions/cache@v2
|
uses: actions/cache@v2
|
||||||
@@ -277,19 +249,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
|
|
||||||
- name: install gomobile
|
|
||||||
run: |
|
|
||||||
go get golang.org/x/mobile/cmd/gobind
|
|
||||||
go get golang.org/x/mobile/cmd/gomobile
|
|
||||||
env PATH=$PATH:~/go/bin gomobile init
|
|
||||||
|
|
||||||
- name: arm-v7a gomobile build
|
|
||||||
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
|
||||||
|
|
||||||
- name: arm-v7a Set environment variables
|
- name: arm-v7a Set environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||||
@@ -302,7 +265,7 @@ jobs:
|
|||||||
- name: arm64-v8a Set environment variables
|
- name: arm64-v8a Set environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||||
@@ -315,7 +278,7 @@ jobs:
|
|||||||
- name: x86 Set environment variables
|
- name: x86 Set environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||||
@@ -328,7 +291,7 @@ jobs:
|
|||||||
- name: x64 Set environment variables
|
- name: x64 Set environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
if: github.repository == 'rclone/rclone'
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Build image job
|
name: Build image job
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
if: github.repository == 'rclone/rclone'
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Build image job
|
name: Build image job
|
||||||
steps:
|
steps:
|
||||||
@@ -32,28 +31,3 @@ jobs:
|
|||||||
publish: true
|
publish: true
|
||||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||||
|
|
||||||
build_docker_volume_plugin:
|
|
||||||
if: github.repository == 'rclone/rclone'
|
|
||||||
needs: build
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Build docker plugin job
|
|
||||||
steps:
|
|
||||||
- name: Checkout master
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Build and publish docker plugin
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
VER=${GITHUB_REF#refs/tags/}
|
|
||||||
PLUGIN_USER=rclone
|
|
||||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
|
||||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
|
||||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do
|
|
||||||
export PLUGIN_USER PLUGIN_ARCH
|
|
||||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
|
||||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
|
||||||
done
|
|
||||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
|
||||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
|
||||||
|
|||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -11,7 +11,3 @@ rclone.iml
|
|||||||
*.log
|
*.log
|
||||||
*.iml
|
*.iml
|
||||||
fuzz-build.zip
|
fuzz-build.zip
|
||||||
*.orig
|
|
||||||
*.rej
|
|
||||||
Thumbs.db
|
|
||||||
__pycache__
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ linters:
|
|||||||
- deadcode
|
- deadcode
|
||||||
- errcheck
|
- errcheck
|
||||||
- goimports
|
- goimports
|
||||||
- revive
|
- golint
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- structcheck
|
- structcheck
|
||||||
- varcheck
|
- varcheck
|
||||||
@@ -24,7 +24,3 @@ issues:
|
|||||||
|
|
||||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||||
max-same-issues: 0
|
max-same-issues: 0
|
||||||
|
|
||||||
run:
|
|
||||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
|
||||||
timeout: 10m
|
|
||||||
|
|||||||
184
CONTRIBUTING.md
184
CONTRIBUTING.md
@@ -12,164 +12,95 @@ When filing an issue, please include the following information if
|
|||||||
possible as well as a description of the problem. Make sure you test
|
possible as well as a description of the problem. Make sure you test
|
||||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||||
|
|
||||||
* Rclone version (e.g. output from `rclone version`)
|
* Rclone version (e.g. output from `rclone -V`)
|
||||||
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
|
||||||
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||||
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||||
* if the log contains secrets then edit the file with a text editor first to obscure them
|
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||||
|
|
||||||
## Submitting a new feature or bug fix ##
|
## Submitting a pull request ##
|
||||||
|
|
||||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||||
like to implement then please submit a pull request via GitHub.
|
like to implement then please submit a pull request via GitHub.
|
||||||
|
|
||||||
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
|
If it is a big feature then make an issue first so it can be discussed.
|
||||||
|
|
||||||
To prepare your pull request first press the fork button on [rclone's GitHub
|
You'll need a Go environment set up with GOPATH set. See [the Go
|
||||||
|
getting started docs](https://golang.org/doc/install) for more info.
|
||||||
|
|
||||||
|
First in your web browser press the fork button on [rclone's GitHub
|
||||||
page](https://github.com/rclone/rclone).
|
page](https://github.com/rclone/rclone).
|
||||||
|
|
||||||
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
Now in your terminal
|
||||||
|
|
||||||
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
|
|
||||||
|
|
||||||
git clone https://github.com/rclone/rclone.git
|
git clone https://github.com/rclone/rclone.git
|
||||||
cd rclone
|
cd rclone
|
||||||
git remote rename origin upstream
|
git remote rename origin upstream
|
||||||
# if you have SSH keys setup in your GitHub account:
|
|
||||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||||
# otherwise:
|
|
||||||
git remote add origin https://github.com/YOURUSER/rclone.git
|
|
||||||
|
|
||||||
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
|
|
||||||
|
|
||||||
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
|
||||||
|
|
||||||
go version
|
|
||||||
|
|
||||||
Great, you can now compile and execute your own version of rclone:
|
|
||||||
|
|
||||||
go build
|
go build
|
||||||
./rclone version
|
|
||||||
|
|
||||||
(Note that you can also replace `go build` with `make`, which will include a
|
Make a branch to add your new feature
|
||||||
more accurate version number in the executable as well as enable you to specify
|
|
||||||
more build options.) Finally make a branch to add your new feature
|
|
||||||
|
|
||||||
git checkout -b my-new-feature
|
git checkout -b my-new-feature
|
||||||
|
|
||||||
And get hacking.
|
And get hacking.
|
||||||
|
|
||||||
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
|
When ready - run the unit tests for the code you changed
|
||||||
|
|
||||||
When ready - test the affected functionality and run the unit tests for the code you changed
|
|
||||||
|
|
||||||
cd folder/with/changed/files
|
|
||||||
go test -v
|
go test -v
|
||||||
|
|
||||||
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
||||||
of the unit tests.
|
of the unit tests.
|
||||||
|
|
||||||
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
|
Note the top level Makefile targets
|
||||||
|
|
||||||
|
* make check
|
||||||
|
* make test
|
||||||
|
|
||||||
|
Both of these will be run by Travis when you make a pull request but
|
||||||
|
you can do this yourself locally too. These require some extra go
|
||||||
|
packages which you can install with
|
||||||
|
|
||||||
|
* make build_dep
|
||||||
|
|
||||||
Make sure you
|
Make sure you
|
||||||
|
|
||||||
* Add [unit tests](#testing) for a new feature.
|
|
||||||
* Add [documentation](#writing-documentation) for a new feature.
|
* Add [documentation](#writing-documentation) for a new feature.
|
||||||
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
* Follow the [commit message guidelines](#commit-messages).
|
||||||
|
* Add [unit tests](#testing) for a new feature
|
||||||
|
* squash commits down to one per feature
|
||||||
|
* rebase to master with `git rebase master`
|
||||||
|
|
||||||
When you are done with that push your changes to Github:
|
When you are done with that
|
||||||
|
|
||||||
git push -u origin my-new-feature
|
git push -u origin my-new-feature
|
||||||
|
|
||||||
and open the GitHub website to [create your pull
|
Go to the GitHub website and click [Create pull
|
||||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||||
|
|
||||||
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
|
You patch will get reviewed and you might get asked to fix some stuff.
|
||||||
|
|
||||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running:
|
||||||
|
```
|
||||||
|
git log # See how many commits you want to squash
|
||||||
|
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
|
||||||
|
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
|
||||||
|
git commit # Add a new commit message.
|
||||||
|
git push --force # Push the squashed commit to your GitHub repo.
|
||||||
|
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
|
||||||
|
```
|
||||||
|
|
||||||
## Using Git and Github ##
|
## CI for your fork ##
|
||||||
|
|
||||||
### Committing your changes ###
|
|
||||||
|
|
||||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
|
||||||
|
|
||||||
git checkout my-new-feature # To switch to your branch
|
|
||||||
git status # To see the new and changed files
|
|
||||||
git add FILENAME # To select FILENAME for the commit
|
|
||||||
git status # To verify the changes to be committed
|
|
||||||
git commit # To do the commit
|
|
||||||
git log # To verify the commit. Use q to quit the log
|
|
||||||
|
|
||||||
You can modify the message or changes in the latest commit using:
|
|
||||||
|
|
||||||
git commit --amend
|
|
||||||
|
|
||||||
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
|
||||||
|
|
||||||
### Replacing your previously pushed commits ###
|
|
||||||
|
|
||||||
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
|
||||||
|
|
||||||
Your previously pushed commits are replaced by:
|
|
||||||
|
|
||||||
git push --force origin my-new-feature
|
|
||||||
|
|
||||||
### Basing your changes on the latest master ###
|
|
||||||
|
|
||||||
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
|
||||||
|
|
||||||
git checkout master
|
|
||||||
git fetch upstream
|
|
||||||
git merge --ff-only
|
|
||||||
git push origin --follow-tags # optional update of your fork in GitHub
|
|
||||||
git checkout my-new-feature
|
|
||||||
git rebase master
|
|
||||||
|
|
||||||
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
|
||||||
|
|
||||||
### Squashing your commits ###
|
|
||||||
|
|
||||||
To combine your commits into one commit:
|
|
||||||
|
|
||||||
git log # To count the commits to squash, e.g. the last 2
|
|
||||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
|
||||||
git status # To check everything is as expected
|
|
||||||
|
|
||||||
If everything is fine, then make the new combined commit:
|
|
||||||
|
|
||||||
git commit # To commit the undone commits as one
|
|
||||||
|
|
||||||
otherwise, you may roll back using:
|
|
||||||
|
|
||||||
git reflog # To check that HEAD{1} is your previous state
|
|
||||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
|
||||||
|
|
||||||
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
|
||||||
|
|
||||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
|
||||||
|
|
||||||
### GitHub Continuous Integration ###
|
|
||||||
|
|
||||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||||
|
|
||||||
## Testing ##
|
## Testing ##
|
||||||
|
|
||||||
### Quick testing ###
|
|
||||||
|
|
||||||
rclone's tests are run from the go testing framework, so at the top
|
rclone's tests are run from the go testing framework, so at the top
|
||||||
level you can run this to run all the tests.
|
level you can run this to run all the tests.
|
||||||
|
|
||||||
go test -v ./...
|
go test -v ./...
|
||||||
|
|
||||||
You can also use `make`, if supported by your platform
|
|
||||||
|
|
||||||
make quicktest
|
|
||||||
|
|
||||||
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
|
||||||
|
|
||||||
### Backend testing ###
|
|
||||||
|
|
||||||
rclone contains a mixture of unit tests and integration tests.
|
rclone contains a mixture of unit tests and integration tests.
|
||||||
Because it is difficult (and in some respects pointless) to test cloud
|
Because it is difficult (and in some respects pointless) to test cloud
|
||||||
storage systems by mocking all their interfaces, rclone unit tests can
|
storage systems by mocking all their interfaces, rclone unit tests can
|
||||||
@@ -203,19 +134,12 @@ project root:
|
|||||||
go install github.com/rclone/rclone/fstest/test_all
|
go install github.com/rclone/rclone/fstest/test_all
|
||||||
test_all -backend drive
|
test_all -backend drive
|
||||||
|
|
||||||
### Full integration testing ###
|
|
||||||
|
|
||||||
If you want to run all the integration tests against all the remotes,
|
If you want to run all the integration tests against all the remotes,
|
||||||
then change into the project root and run
|
then change into the project root and run
|
||||||
|
|
||||||
make check
|
|
||||||
make test
|
make test
|
||||||
|
|
||||||
The commands may require some extra go packages which you can install with
|
This command is run daily on the integration test server. You can
|
||||||
|
|
||||||
make build_dep
|
|
||||||
|
|
||||||
The full integration tests are run daily on the integration test server. You can
|
|
||||||
find the results at https://pub.rclone.org/integration-tests/
|
find the results at https://pub.rclone.org/integration-tests/
|
||||||
|
|
||||||
## Code Organisation ##
|
## Code Organisation ##
|
||||||
@@ -230,7 +154,6 @@ with modules beneath.
|
|||||||
* cmd - the rclone commands
|
* cmd - the rclone commands
|
||||||
* all - import this to load all the commands
|
* all - import this to load all the commands
|
||||||
* ...commands
|
* ...commands
|
||||||
* cmdtest - end-to-end tests of commands, flags, environment variables,...
|
|
||||||
* docs - the documentation and website
|
* docs - the documentation and website
|
||||||
* content - adjust these docs only - everything else is autogenerated
|
* content - adjust these docs only - everything else is autogenerated
|
||||||
* command - these are auto generated - edit the corresponding .go file
|
* command - these are auto generated - edit the corresponding .go file
|
||||||
@@ -275,28 +198,9 @@ If you add a new general flag (not for a backend), then document it in
|
|||||||
alphabetical order.
|
alphabetical order.
|
||||||
|
|
||||||
If you add a new backend option/flag, then it should be documented in
|
If you add a new backend option/flag, then it should be documented in
|
||||||
the source file in the `Help:` field.
|
the source file in the `Help:` field. The first line of this is used
|
||||||
|
for the flag help, the remainder is shown to the user in `rclone
|
||||||
* Start with the most important information about the option,
|
config` and is added to the docs with `make backenddocs`.
|
||||||
as a single sentence on a single line.
|
|
||||||
* This text will be used for the command-line flag help.
|
|
||||||
* It will be combined with other information, such as any default value,
|
|
||||||
and the result will look odd if not written as a single sentence.
|
|
||||||
* It should end with a period/full stop character, which will be shown
|
|
||||||
in docs but automatically removed when producing the flag help.
|
|
||||||
* Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
|
||||||
* More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
|
||||||
* Like with docs generated from Markdown, a single line break is ignored
|
|
||||||
and two line breaks creates a new paragraph.
|
|
||||||
* This text will be shown to the user in `rclone config`
|
|
||||||
and in the docs (where it will be added by `make backenddocs`,
|
|
||||||
normally run some time before next release).
|
|
||||||
* To create options of enumeration type use the `Examples:` field.
|
|
||||||
* Each example value have their own `Help:` field, but they are treated
|
|
||||||
a bit different than the main option help text. They will be shown
|
|
||||||
as an unordered list, therefore a single line break is enough to
|
|
||||||
create a new list item. Also, for enumeration texts like name of
|
|
||||||
countries, it looks better without an ending period/full stop character.
|
|
||||||
|
|
||||||
The only documentation you need to edit are the `docs/content/*.md`
|
The only documentation you need to edit are the `docs/content/*.md`
|
||||||
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
|
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
|
||||||
@@ -305,9 +209,7 @@ website` targets in the Makefile if you are interested in how. You
|
|||||||
don't need to run these when adding a feature.
|
don't need to run these when adding a feature.
|
||||||
|
|
||||||
Documentation for rclone sub commands is with their code, e.g.
|
Documentation for rclone sub commands is with their code, e.g.
|
||||||
`cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
|
`cmd/ls/ls.go`.
|
||||||
line, without a period/full stop character at the end, as it will be
|
|
||||||
combined unmodified with other information (such as any default value).
|
|
||||||
|
|
||||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||||
for small changes in the docs which makes it very easy.
|
for small changes in the docs which makes it very easy.
|
||||||
|
|||||||
7170
MANUAL.html
generated
7170
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
8977
MANUAL.txt
generated
8977
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
30
Makefile
30
Makefile
@@ -256,33 +256,3 @@ startstable:
|
|||||||
|
|
||||||
winzip:
|
winzip:
|
||||||
zip -9 rclone-$(TAG).zip rclone.exe
|
zip -9 rclone-$(TAG).zip rclone.exe
|
||||||
|
|
||||||
# docker volume plugin
|
|
||||||
PLUGIN_USER ?= rclone
|
|
||||||
PLUGIN_TAG ?= latest
|
|
||||||
PLUGIN_BASE_TAG ?= latest
|
|
||||||
PLUGIN_ARCH ?= amd64
|
|
||||||
PLUGIN_IMAGE := $(PLUGIN_USER)/docker-volume-rclone:$(PLUGIN_TAG)
|
|
||||||
PLUGIN_BASE := $(PLUGIN_USER)/rclone:$(PLUGIN_BASE_TAG)
|
|
||||||
PLUGIN_BUILD_DIR := ./build/docker-plugin
|
|
||||||
PLUGIN_CONTRIB_DIR := ./contrib/docker-plugin/managed
|
|
||||||
|
|
||||||
docker-plugin-create:
|
|
||||||
docker buildx inspect |grep -q /${PLUGIN_ARCH} || \
|
|
||||||
docker run --rm --privileged tonistiigi/binfmt --install all
|
|
||||||
rm -rf ${PLUGIN_BUILD_DIR}
|
|
||||||
docker buildx build \
|
|
||||||
--no-cache --pull \
|
|
||||||
--build-arg BASE_IMAGE=${PLUGIN_BASE} \
|
|
||||||
--platform linux/${PLUGIN_ARCH} \
|
|
||||||
--output ${PLUGIN_BUILD_DIR}/rootfs \
|
|
||||||
${PLUGIN_CONTRIB_DIR}
|
|
||||||
cp ${PLUGIN_CONTRIB_DIR}/config.json ${PLUGIN_BUILD_DIR}
|
|
||||||
docker plugin rm --force ${PLUGIN_IMAGE} 2>/dev/null || true
|
|
||||||
docker plugin create ${PLUGIN_IMAGE} ${PLUGIN_BUILD_DIR}
|
|
||||||
|
|
||||||
docker-plugin-push:
|
|
||||||
docker plugin push ${PLUGIN_IMAGE}
|
|
||||||
docker plugin rm ${PLUGIN_IMAGE}
|
|
||||||
|
|
||||||
docker-plugin: docker-plugin-create docker-plugin-push
|
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
|||||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||||
|
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||||
@@ -61,7 +62,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
|||||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
|
||||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||||
@@ -87,6 +87,7 @@ Please see [the full list of all storage providers and their features](https://r
|
|||||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||||
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||||
|
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||||
* Multi-threaded downloads to local disk
|
* Multi-threaded downloads to local disk
|
||||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote or path to alias.\n\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||||
Required: true,
|
Required: true,
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func prepare(t *testing.T, root string) {
|
func prepare(t *testing.T, root string) {
|
||||||
configfile.Install()
|
configfile.LoadConfig(context.Background())
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.FileSet(remoteName, "type", "alias")
|
config.FileSet(remoteName, "type", "alias")
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/ftp"
|
_ "github.com/rclone/rclone/backend/ftp"
|
||||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||||
_ "github.com/rclone/rclone/backend/hasher"
|
|
||||||
_ "github.com/rclone/rclone/backend/hdfs"
|
_ "github.com/rclone/rclone/backend/hdfs"
|
||||||
_ "github.com/rclone/rclone/backend/http"
|
_ "github.com/rclone/rclone/backend/http"
|
||||||
_ "github.com/rclone/rclone/backend/hubic"
|
_ "github.com/rclone/rclone/backend/hubic"
|
||||||
@@ -38,12 +37,10 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/seafile"
|
_ "github.com/rclone/rclone/backend/seafile"
|
||||||
_ "github.com/rclone/rclone/backend/sftp"
|
_ "github.com/rclone/rclone/backend/sftp"
|
||||||
_ "github.com/rclone/rclone/backend/sharefile"
|
_ "github.com/rclone/rclone/backend/sharefile"
|
||||||
_ "github.com/rclone/rclone/backend/sia"
|
|
||||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||||
_ "github.com/rclone/rclone/backend/swift"
|
_ "github.com/rclone/rclone/backend/swift"
|
||||||
_ "github.com/rclone/rclone/backend/tardigrade"
|
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||||
_ "github.com/rclone/rclone/backend/union"
|
_ "github.com/rclone/rclone/backend/union"
|
||||||
_ "github.com/rclone/rclone/backend/uptobox"
|
|
||||||
_ "github.com/rclone/rclone/backend/webdav"
|
_ "github.com/rclone/rclone/backend/webdav"
|
||||||
_ "github.com/rclone/rclone/backend/yandex"
|
_ "github.com/rclone/rclone/backend/yandex"
|
||||||
_ "github.com/rclone/rclone/backend/zoho"
|
_ "github.com/rclone/rclone/backend/zoho"
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -69,10 +70,11 @@ func init() {
|
|||||||
Prefix: "acd",
|
Prefix: "acd",
|
||||||
Description: "Amazon Drive",
|
Description: "Amazon Drive",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
|
||||||
OAuth2Config: acdConfig,
|
if err != nil {
|
||||||
})
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
|
}
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "checkpoint",
|
Name: "checkpoint",
|
||||||
@@ -81,16 +83,16 @@ func init() {
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_wait_per_gb",
|
Name: "upload_wait_per_gb",
|
||||||
Help: `Additional time per GiB to wait after a failed complete upload to see if it appears.
|
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
|
||||||
|
|
||||||
Sometimes Amazon Drive gives an error when a file has been fully
|
Sometimes Amazon Drive gives an error when a file has been fully
|
||||||
uploaded but the file appears anyway after a little while. This
|
uploaded but the file appears anyway after a little while. This
|
||||||
happens sometimes for files over 1 GiB in size and nearly every time for
|
happens sometimes for files over 1GB in size and nearly every time for
|
||||||
files bigger than 10 GiB. This parameter controls the time rclone waits
|
files bigger than 10GB. This parameter controls the time rclone waits
|
||||||
for the file to appear.
|
for the file to appear.
|
||||||
|
|
||||||
The default value for this parameter is 3 minutes per GiB, so by
|
The default value for this parameter is 3 minutes per GB, so by
|
||||||
default it will wait 3 minutes for every GiB uploaded to see if the
|
default it will wait 3 minutes for every GB uploaded to see if the
|
||||||
file appears.
|
file appears.
|
||||||
|
|
||||||
You can disable this feature by setting it to 0. This may cause
|
You can disable this feature by setting it to 0. This may cause
|
||||||
@@ -110,7 +112,7 @@ in this situation.`,
|
|||||||
|
|
||||||
Files this size or more will be downloaded via their "tempLink". This
|
Files this size or more will be downloaded via their "tempLink". This
|
||||||
is to work around a problem with Amazon Drive which blocks downloads
|
is to work around a problem with Amazon Drive which blocks downloads
|
||||||
of files bigger than about 10 GiB. The default for this is 9 GiB which
|
of files bigger than about 10GB. The default for this is 9GB which
|
||||||
shouldn't need to be changed.
|
shouldn't need to be changed.
|
||||||
|
|
||||||
To download files above this threshold, rclone requests a "tempLink"
|
To download files above this threshold, rclone requests a "tempLink"
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
// Test AmazonCloudDrive filesystem interface
|
// Test AmazonCloudDrive filesystem interface
|
||||||
|
|
||||||
//go:build acd
|
|
||||||
// +build acd
|
// +build acd
|
||||||
|
|
||||||
package amazonclouddrive_test
|
package amazonclouddrive_test
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||||
|
|
||||||
//go:build !plan9 && !solaris && !js
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
// +build !plan9,!solaris,!js
|
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
@@ -16,7 +15,6 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@@ -49,8 +47,8 @@ const (
|
|||||||
timeFormatIn = time.RFC3339
|
timeFormatIn = time.RFC3339
|
||||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||||
storageDefaultBaseURL = "blob.core.windows.net"
|
storageDefaultBaseURL = "blob.core.windows.net"
|
||||||
defaultChunkSize = 4 * fs.Mebi
|
defaultChunkSize = 4 * fs.MebiByte
|
||||||
maxChunkSize = 100 * fs.Mebi
|
maxChunkSize = 100 * fs.MebiByte
|
||||||
uploadConcurrency = 4
|
uploadConcurrency = 4
|
||||||
defaultAccessTier = azblob.AccessTierNone
|
defaultAccessTier = azblob.AccessTierNone
|
||||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||||
@@ -75,29 +73,30 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "account",
|
Name: "account",
|
||||||
Help: "Storage Account Name.\n\nLeave blank to use SAS URL or Emulator.",
|
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)",
|
||||||
}, {
|
}, {
|
||||||
Name: "service_principal_file",
|
Name: "service_principal_file",
|
||||||
Help: `Path to file containing credentials for use with a service principal.
|
Help: `Path to file containing credentials for use with a service principal.
|
||||||
|
|
||||||
Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
|
Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
|
||||||
|
|
||||||
$ az ad sp create-for-rbac --name "<name>" \
|
$ az sp create-for-rbac --name "<name>" \
|
||||||
--role "Storage Blob Data Owner" \
|
--role "Storage Blob Data Owner" \
|
||||||
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
|
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
|
||||||
> azure-principal.json
|
> azure-principal.json
|
||||||
|
|
||||||
See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
|
See [Use Azure CLI to assign an Azure role for access to blob and queue data](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli)
|
||||||
|
for more details.
|
||||||
`,
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "key",
|
Name: "key",
|
||||||
Help: "Storage Account Key.\n\nLeave blank to use SAS URL or Emulator.",
|
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)",
|
||||||
}, {
|
}, {
|
||||||
Name: "sas_url",
|
Name: "sas_url",
|
||||||
Help: "SAS URL for container level access only.\n\nLeave blank if using account/key or Emulator.",
|
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)",
|
||||||
}, {
|
}, {
|
||||||
Name: "use_msi",
|
Name: "use_msi",
|
||||||
Help: `Use a managed service identity to authenticate (only works in Azure).
|
Help: `Use a managed service identity to authenticate (only works in Azure)
|
||||||
|
|
||||||
When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
|
When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
|
||||||
to authenticate to Azure Storage instead of a SAS token or account key.
|
to authenticate to Azure Storage instead of a SAS token or account key.
|
||||||
@@ -110,31 +109,31 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
|||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "msi_object_id",
|
Name: "msi_object_id",
|
||||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
|
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "msi_client_id",
|
Name: "msi_client_id",
|
||||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
|
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "msi_mi_res_id",
|
Name: "msi_mi_res_id",
|
||||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
Help: "Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "use_emulator",
|
Name: "use_emulator",
|
||||||
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
|
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
|
||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
Help: "Endpoint for the service\nLeave blank normally.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_cutoff",
|
Name: "upload_cutoff",
|
||||||
Help: "Cutoff for switching to chunked upload (<= 256 MiB) (deprecated).",
|
Help: "Cutoff for switching to chunked upload (<= 256MB). (Deprecated)",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Help: `Upload chunk size (<= 100 MiB).
|
Help: `Upload chunk size (<= 100MB).
|
||||||
|
|
||||||
Note that this is stored in memory and there may be up to
|
Note that this is stored in memory and there may be up to
|
||||||
"--transfers" chunks stored at once in memory.`,
|
"--transfers" chunks stored at once in memory.`,
|
||||||
@@ -201,7 +200,6 @@ to start uploading.`,
|
|||||||
Default: memoryPoolFlushTime,
|
Default: memoryPoolFlushTime,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Help: `How often internal memory buffer pools will be flushed.
|
Help: `How often internal memory buffer pools will be flushed.
|
||||||
|
|
||||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||||
This option controls how often unused buffers will be removed from the pool.`,
|
This option controls how often unused buffers will be removed from the pool.`,
|
||||||
}, {
|
}, {
|
||||||
@@ -221,12 +219,12 @@ This option controls how often unused buffers will be removed from the pool.`,
|
|||||||
encoder.EncodeRightPeriod),
|
encoder.EncodeRightPeriod),
|
||||||
}, {
|
}, {
|
||||||
Name: "public_access",
|
Name: "public_access",
|
||||||
Help: "Public access level of a container: blob or container.",
|
Help: "Public access level of a container: blob, container.",
|
||||||
Default: string(azblob.PublicAccessNone),
|
Default: string(azblob.PublicAccessNone),
|
||||||
Examples: []fs.OptionExample{
|
Examples: []fs.OptionExample{
|
||||||
{
|
{
|
||||||
Value: string(azblob.PublicAccessNone),
|
Value: string(azblob.PublicAccessNone),
|
||||||
Help: "The container and its blobs can be accessed only with an authorized request.\nIt's a default value.",
|
Help: "The container and its blobs can be accessed only with an authorized request. It's a default value",
|
||||||
}, {
|
}, {
|
||||||
Value: string(azblob.PublicAccessBlob),
|
Value: string(azblob.PublicAccessBlob),
|
||||||
Help: "Blob data within this container can be read via anonymous request.",
|
Help: "Blob data within this container can be read via anonymous request.",
|
||||||
@@ -236,11 +234,6 @@ This option controls how often unused buffers will be removed from the pool.`,
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "no_head_object",
|
|
||||||
Help: `If set, do not do HEAD before GET when getting objects.`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -266,7 +259,6 @@ type Options struct {
|
|||||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
PublicAccess string `config:"public_access"`
|
PublicAccess string `config:"public_access"`
|
||||||
NoHeadObject bool `config:"no_head_object"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote azure server
|
// Fs represents a remote azure server
|
||||||
@@ -412,7 +404,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
const minChunkSize = fs.SizeSuffixBase
|
const minChunkSize = fs.Byte
|
||||||
if cs < minChunkSize {
|
if cs < minChunkSize {
|
||||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||||
}
|
}
|
||||||
@@ -765,7 +757,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItemInternal) (fs
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else if !o.fs.opt.NoHeadObject {
|
} else {
|
||||||
err := o.readMetaData() // reads info and headers, returning an error
|
err := o.readMetaData() // reads info and headers, returning an error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1375,39 +1367,6 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) decodeMetaDataFromDownloadResponse(info *azblob.DownloadResponse) (err error) {
|
|
||||||
metadata := info.NewMetadata()
|
|
||||||
size := info.ContentLength()
|
|
||||||
if isDirectoryMarker(size, metadata, o.remote) {
|
|
||||||
return fs.ErrorNotAFile
|
|
||||||
}
|
|
||||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
|
||||||
// this as base64 encoded string.
|
|
||||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
|
||||||
o.mimeType = info.ContentType()
|
|
||||||
o.size = size
|
|
||||||
o.modTime = info.LastModified()
|
|
||||||
o.accessTier = o.AccessTier()
|
|
||||||
o.setMetadata(metadata)
|
|
||||||
|
|
||||||
// If it was a Range request, the size is wrong, so correct it
|
|
||||||
if contentRange := info.ContentRange(); contentRange != "" {
|
|
||||||
slash := strings.IndexRune(contentRange, '/')
|
|
||||||
if slash >= 0 {
|
|
||||||
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
|
|
||||||
if err == nil {
|
|
||||||
o.size = i
|
|
||||||
} else {
|
|
||||||
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fs.Debugf(o, "Failed to find length in %q", contentRange)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItemInternal) (err error) {
|
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItemInternal) (err error) {
|
||||||
metadata := info.Metadata
|
metadata := info.Metadata
|
||||||
size := *info.Properties.ContentLength
|
size := *info.Properties.ContentLength
|
||||||
@@ -1538,10 +1497,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to open for download")
|
return nil, errors.Wrap(err, "failed to open for download")
|
||||||
}
|
}
|
||||||
err = o.decodeMetaDataFromDownloadResponse(downloadResponse)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to decode metadata for download")
|
|
||||||
}
|
|
||||||
in = downloadResponse.Body(azblob.RetryReaderOptions{})
|
in = downloadResponse.Body(azblob.RetryReaderOptions{})
|
||||||
return in, nil
|
return in, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9 && !solaris && !js
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
// +build !plan9,!solaris,!js
|
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
// Test AzureBlob filesystem interface
|
// Test AzureBlob filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !solaris && !js
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
// +build !plan9,!solaris,!js
|
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
// Build for azureblob for unsupported platforms to stop go complaining
|
// Build for azureblob for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || solaris || js
|
// +build plan9 solaris js !go1.14
|
||||||
// +build plan9 solaris js
|
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9 && !solaris && !js
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
// +build !plan9,!solaris,!js
|
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9 && !solaris && !js
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
// +build !plan9,!solaris,!js
|
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
|
|||||||
@@ -2,11 +2,12 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/lib/version"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Error describes a B2 error response
|
// Error describes a B2 error response
|
||||||
@@ -62,17 +63,16 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasVersion returns true if it looks like the passed filename has a timestamp on it.
|
const versionFormat = "-v2006-01-02-150405.000"
|
||||||
//
|
|
||||||
// Note that the passed filename's timestamp may still be invalid even if this
|
|
||||||
// function returns true.
|
|
||||||
func HasVersion(remote string) bool {
|
|
||||||
return version.Match(remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||||
func (t Timestamp) AddVersion(remote string) string {
|
func (t Timestamp) AddVersion(remote string) string {
|
||||||
return version.Add(remote, time.Time(t))
|
ext := path.Ext(remote)
|
||||||
|
base := remote[:len(remote)-len(ext)]
|
||||||
|
s := time.Time(t).Format(versionFormat)
|
||||||
|
// Replace the '.' with a '-'
|
||||||
|
s = strings.Replace(s, ".", "-", -1)
|
||||||
|
return base + s + ext
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||||
@@ -80,10 +80,25 @@ func (t Timestamp) AddVersion(remote string) string {
|
|||||||
// It returns the new file name and a timestamp, or the old filename
|
// It returns the new file name and a timestamp, or the old filename
|
||||||
// and a zero timestamp.
|
// and a zero timestamp.
|
||||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||||
time, newRemote := version.Remove(remote)
|
newRemote = remote
|
||||||
t = Timestamp(time)
|
ext := path.Ext(remote)
|
||||||
|
base := remote[:len(remote)-len(ext)]
|
||||||
|
if len(base) < len(versionFormat) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
versionStart := len(base) - len(versionFormat)
|
||||||
|
// Check it ends in -xxx
|
||||||
|
if base[len(base)-4] != '-' {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Replace with .xxx for parsing
|
||||||
|
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
||||||
|
newT, err := time.Parse(versionFormat, base[versionStart:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return Timestamp(newT), base[:versionStart] + ext
|
||||||
|
}
|
||||||
|
|
||||||
// IsZero returns true if the timestamp is uninitialized
|
// IsZero returns true if the timestamp is uninitialized
|
||||||
func (t Timestamp) IsZero() bool {
|
func (t Timestamp) IsZero() bool {
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
var (
|
var (
|
||||||
emptyT api.Timestamp
|
emptyT api.Timestamp
|
||||||
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
||||||
|
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
|
||||||
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,6 +36,40 @@ func TestTimestampUnmarshalJSON(t *testing.T) {
|
|||||||
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTimestampAddVersion(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
t api.Timestamp
|
||||||
|
in string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
|
||||||
|
{t1, "potato", "potato-v2001-02-03-040506-123"},
|
||||||
|
{t1, "", "-v2001-02-03-040506-123"},
|
||||||
|
} {
|
||||||
|
actual := test.t.AddVersion(test.in)
|
||||||
|
assert.Equal(t, test.expected, actual, test.in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTimestampRemoveVersion(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
expectedT api.Timestamp
|
||||||
|
expectedRemote string
|
||||||
|
}{
|
||||||
|
{"potato.txt", emptyT, "potato.txt"},
|
||||||
|
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
|
||||||
|
{"potato-v2001-02-03-040506-123", t1, "potato"},
|
||||||
|
{"-v2001-02-03-040506-123", t1, ""},
|
||||||
|
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
|
||||||
|
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
|
||||||
|
} {
|
||||||
|
actualT, actualRemote := api.RemoveVersion(test.in)
|
||||||
|
assert.Equal(t, test.expectedT, actualT, test.in)
|
||||||
|
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestTimestampIsZero(t *testing.T) {
|
func TestTimestampIsZero(t *testing.T) {
|
||||||
assert.True(t, emptyT.IsZero())
|
assert.True(t, emptyT.IsZero())
|
||||||
assert.False(t, t0.IsZero())
|
assert.False(t, t0.IsZero())
|
||||||
|
|||||||
@@ -54,10 +54,10 @@ const (
|
|||||||
decayConstant = 1 // bigger for slower decay, exponential
|
decayConstant = 1 // bigger for slower decay, exponential
|
||||||
maxParts = 10000
|
maxParts = 10000
|
||||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||||
minChunkSize = 5 * fs.Mebi
|
minChunkSize = 5 * fs.MebiByte
|
||||||
defaultChunkSize = 96 * fs.Mebi
|
defaultChunkSize = 96 * fs.MebiByte
|
||||||
defaultUploadCutoff = 200 * fs.Mebi
|
defaultUploadCutoff = 200 * fs.MebiByte
|
||||||
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max
|
||||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||||
memoryPoolUseMmap = false
|
memoryPoolUseMmap = false
|
||||||
)
|
)
|
||||||
@@ -75,15 +75,15 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "account",
|
Name: "account",
|
||||||
Help: "Account ID or Application Key ID.",
|
Help: "Account ID or Application Key ID",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "key",
|
Name: "key",
|
||||||
Help: "Application Key.",
|
Help: "Application Key",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
Help: "Endpoint for the service.\nLeave blank normally.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "test_mode",
|
Name: "test_mode",
|
||||||
@@ -103,7 +103,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "versions",
|
Name: "versions",
|
||||||
Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -121,29 +121,27 @@ This value should be set no larger than 4.657 GiB (== 5 GB).`,
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "copy_cutoff",
|
Name: "copy_cutoff",
|
||||||
Help: `Cutoff for switching to multipart copy.
|
Help: `Cutoff for switching to multipart copy
|
||||||
|
|
||||||
Any files larger than this that need to be server-side copied will be
|
Any files larger than this that need to be server-side copied will be
|
||||||
copied in chunks of this size.
|
copied in chunks of this size.
|
||||||
|
|
||||||
The minimum is 0 and the maximum is 4.6 GiB.`,
|
The minimum is 0 and the maximum is 4.6GB.`,
|
||||||
Default: largeFileCopyCutoff,
|
Default: largeFileCopyCutoff,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Help: `Upload chunk size.
|
Help: `Upload chunk size. Must fit in memory.
|
||||||
|
|
||||||
When uploading large files, chunk the file into this size.
|
When uploading large files, chunk the file into this size. Note that
|
||||||
|
these chunks are buffered in memory and there might a maximum of
|
||||||
Must fit in memory. These chunks are buffered in memory and there
|
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
|
||||||
might a maximum of "--transfers" chunks in progress at once.
|
minimum size.`,
|
||||||
|
|
||||||
5,000,000 Bytes is the minimum size.`,
|
|
||||||
Default: defaultChunkSize,
|
Default: defaultChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "disable_checksum",
|
Name: "disable_checksum",
|
||||||
Help: `Disable checksums for large (> upload cutoff) files.
|
Help: `Disable checksums for large (> upload cutoff) files
|
||||||
|
|
||||||
Normally rclone will calculate the SHA1 checksum of the input before
|
Normally rclone will calculate the SHA1 checksum of the input before
|
||||||
uploading it so it can add it to metadata on the object. This is great
|
uploading it so it can add it to metadata on the object. This is great
|
||||||
@@ -1355,7 +1353,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
|
|||||||
}
|
}
|
||||||
var request = api.GetDownloadAuthorizationRequest{
|
var request = api.GetDownloadAuthorizationRequest{
|
||||||
BucketID: bucketID,
|
BucketID: bucketID,
|
||||||
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.rootDirectory, remote)),
|
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
|
||||||
ValidDurationInSeconds: validDurationInSeconds,
|
ValidDurationInSeconds: validDurationInSeconds,
|
||||||
}
|
}
|
||||||
var response api.GetDownloadAuthorizationResponse
|
var response api.GetDownloadAuthorizationResponse
|
||||||
|
|||||||
@@ -231,7 +231,7 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
|||||||
// The number of bytes in the file being uploaded. Note that
|
// The number of bytes in the file being uploaded. Note that
|
||||||
// this header is required; you cannot leave it out and just
|
// this header is required; you cannot leave it out and just
|
||||||
// use chunked encoding. The minimum size of every part but
|
// use chunked encoding. The minimum size of every part but
|
||||||
// the last one is 100 MB (100,000,000 bytes)
|
// the last one is 100MB.
|
||||||
//
|
//
|
||||||
// X-Bz-Content-Sha1
|
// X-Bz-Content-Sha1
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ type Error struct {
|
|||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Status int `json:"status"`
|
Status int `json:"status"`
|
||||||
Code string `json:"code"`
|
Code string `json:"code"`
|
||||||
ContextInfo json.RawMessage `json:"context_info"`
|
ContextInfo json.RawMessage
|
||||||
HelpURL string `json:"help_url"`
|
HelpURL string `json:"help_url"`
|
||||||
Message string `json:"message"`
|
Message string `json:"message"`
|
||||||
RequestID string `json:"request_id"`
|
RequestID string `json:"request_id"`
|
||||||
@@ -61,7 +61,7 @@ func (e *Error) Error() string {
|
|||||||
var _ error = (*Error)(nil)
|
var _ error = (*Error)(nil)
|
||||||
|
|
||||||
// ItemFields are the fields needed for FileInfo
|
// ItemFields are the fields needed for FileInfo
|
||||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
|
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
|
||||||
|
|
||||||
// Types of things in Item
|
// Types of things in Item
|
||||||
const (
|
const (
|
||||||
@@ -90,12 +90,6 @@ type Item struct {
|
|||||||
URL string `json:"url,omitempty"`
|
URL string `json:"url,omitempty"`
|
||||||
Access string `json:"access,omitempty"`
|
Access string `json:"access,omitempty"`
|
||||||
} `json:"shared_link"`
|
} `json:"shared_link"`
|
||||||
OwnedBy struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
ID string `json:"id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Login string `json:"login"`
|
|
||||||
} `json:"owned_by"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the modification time of the item
|
// ModTime returns the modification time of the item
|
||||||
@@ -113,7 +107,6 @@ type FolderItems struct {
|
|||||||
Entries []Item `json:"entries"`
|
Entries []Item `json:"entries"`
|
||||||
Offset int `json:"offset"`
|
Offset int `json:"offset"`
|
||||||
Limit int `json:"limit"`
|
Limit int `json:"limit"`
|
||||||
NextMarker *string `json:"next_marker,omitempty"`
|
|
||||||
Order []struct {
|
Order []struct {
|
||||||
By string `json:"by"`
|
By string `json:"by"`
|
||||||
Direction string `json:"direction"`
|
Direction string `json:"direction"`
|
||||||
@@ -139,38 +132,6 @@ type UploadFile struct {
|
|||||||
ContentModifiedAt Time `json:"content_modified_at"`
|
ContentModifiedAt Time `json:"content_modified_at"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PreUploadCheck is the request for upload preflight check
|
|
||||||
type PreUploadCheck struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Parent Parent `json:"parent"`
|
|
||||||
Size *int64 `json:"size,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PreUploadCheckResponse is the response from upload preflight check
|
|
||||||
// if successful
|
|
||||||
type PreUploadCheckResponse struct {
|
|
||||||
UploadToken string `json:"upload_token"`
|
|
||||||
UploadURL string `json:"upload_url"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PreUploadCheckConflict is returned in the ContextInfo error field
|
|
||||||
// from PreUploadCheck when the error code is "item_name_in_use"
|
|
||||||
type PreUploadCheckConflict struct {
|
|
||||||
Conflicts struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
ID string `json:"id"`
|
|
||||||
FileVersion struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
ID string `json:"id"`
|
|
||||||
Sha1 string `json:"sha1"`
|
|
||||||
} `json:"file_version"`
|
|
||||||
SequenceID string `json:"sequence_id"`
|
|
||||||
Etag string `json:"etag"`
|
|
||||||
Sha1 string `json:"sha1"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
} `json:"conflicts"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateFileModTime is used in Update File Info
|
// UpdateFileModTime is used in Update File Info
|
||||||
type UpdateFileModTime struct {
|
type UpdateFileModTime struct {
|
||||||
ContentModifiedAt Time `json:"content_modified_at"`
|
ContentModifiedAt Time `json:"content_modified_at"`
|
||||||
|
|||||||
@@ -17,13 +17,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
@@ -58,6 +57,7 @@ const (
|
|||||||
decayConstant = 2 // bigger for slower decay, exponential
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
rootURL = "https://api.box.com/2.0"
|
rootURL = "https://api.box.com/2.0"
|
||||||
uploadURL = "https://upload.box.com/api/2.0"
|
uploadURL = "https://upload.box.com/api/2.0"
|
||||||
|
listChunks = 1000 // chunk size to read directory listings
|
||||||
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
||||||
defaultUploadCutoff = 50 * 1024 * 1024
|
defaultUploadCutoff = 50 * 1024 * 1024
|
||||||
tokenURL = "https://api.box.com/oauth2/token"
|
tokenURL = "https://api.box.com/oauth2/token"
|
||||||
@@ -84,7 +84,7 @@ func init() {
|
|||||||
Name: "box",
|
Name: "box",
|
||||||
Description: "Box",
|
Description: "Box",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
jsonFile, ok := m.Get("box_config_file")
|
jsonFile, ok := m.Get("box_config_file")
|
||||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||||
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
||||||
@@ -93,15 +93,15 @@ func init() {
|
|||||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||||
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure token with jwt authentication")
|
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||||
}
|
}
|
||||||
// Else, if not using an access token, use oauth2
|
// Else, if not using an access token, use oauth2
|
||||||
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
||||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
|
||||||
OAuth2Config: oauthConfig,
|
if err != nil {
|
||||||
})
|
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil, nil
|
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "root_folder_id",
|
Name: "root_folder_id",
|
||||||
@@ -110,23 +110,23 @@ func init() {
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "box_config_file",
|
Name: "box_config_file",
|
||||||
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
|
||||||
}, {
|
}, {
|
||||||
Name: "access_token",
|
Name: "access_token",
|
||||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
Help: "Box App Primary Access Token\nLeave blank normally.",
|
||||||
}, {
|
}, {
|
||||||
Name: "box_sub_type",
|
Name: "box_sub_type",
|
||||||
Default: "user",
|
Default: "user",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "user",
|
Value: "user",
|
||||||
Help: "Rclone should act on behalf of a user.",
|
Help: "Rclone should act on behalf of a user",
|
||||||
}, {
|
}, {
|
||||||
Value: "enterprise",
|
Value: "enterprise",
|
||||||
Help: "Rclone should act on behalf of a service account.",
|
Help: "Rclone should act on behalf of a service account",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_cutoff",
|
Name: "upload_cutoff",
|
||||||
Help: "Cutoff for switching to multipart upload (>= 50 MiB).",
|
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
||||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -134,16 +134,6 @@ func init() {
|
|||||||
Help: "Max number of times to try committing a multipart file.",
|
Help: "Max number of times to try committing a multipart file.",
|
||||||
Default: 100,
|
Default: 100,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "list_chunk",
|
|
||||||
Default: 1000,
|
|
||||||
Help: "Size of listing chunk 1-1000.",
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "owned_by",
|
|
||||||
Default: "",
|
|
||||||
Help: "Only show items owned by the login (email address) passed in.",
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -167,15 +157,15 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
|||||||
jsonFile = env.ShellExpand(jsonFile)
|
jsonFile = env.ShellExpand(jsonFile)
|
||||||
boxConfig, err := getBoxConfig(jsonFile)
|
boxConfig, err := getBoxConfig(jsonFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "get box config")
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "get decrypted private key")
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
claims, err := getClaims(boxConfig, boxSubType)
|
claims, err := getClaims(boxConfig, boxSubType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "get claims")
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
signingHeaders := getSigningHeaders(boxConfig)
|
signingHeaders := getSigningHeaders(boxConfig)
|
||||||
queryParams := getQueryParams(boxConfig)
|
queryParams := getQueryParams(boxConfig)
|
||||||
@@ -258,8 +248,6 @@ type Options struct {
|
|||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
RootFolderID string `config:"root_folder_id"`
|
RootFolderID string `config:"root_folder_id"`
|
||||||
AccessToken string `config:"access_token"`
|
AccessToken string `config:"access_token"`
|
||||||
ListChunk int `config:"list_chunk"`
|
|
||||||
OwnedBy string `config:"owned_by"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote box
|
// Fs represents a remote box
|
||||||
@@ -339,13 +327,6 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||||||
authRetry = true
|
authRetry = true
|
||||||
fs.Debugf(nil, "Should retry: %v", err)
|
fs.Debugf(nil, "Should retry: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Box API errors which should be retries
|
|
||||||
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "operation_blocked_temporary" {
|
|
||||||
fs.Debugf(nil, "Retrying API error %v", err)
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -360,7 +341,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
|
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||||
if strings.EqualFold(item.Name, leaf) {
|
if strings.EqualFold(item.Name, leaf) {
|
||||||
info = item
|
info = item
|
||||||
return true
|
return true
|
||||||
@@ -535,7 +516,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
// Find the leaf in pathID
|
// Find the leaf in pathID
|
||||||
found, err = f.listAll(ctx, pathID, true, false, true, func(item *api.Item) bool {
|
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||||
if strings.EqualFold(item.Name, leaf) {
|
if strings.EqualFold(item.Name, leaf) {
|
||||||
pathIDOut = item.ID
|
pathIDOut = item.ID
|
||||||
return true
|
return true
|
||||||
@@ -591,20 +572,17 @@ type listAllFn func(*api.Item) bool
|
|||||||
// Lists the directory required calling the user function on each item found
|
// Lists the directory required calling the user function on each item found
|
||||||
//
|
//
|
||||||
// If the user fn ever returns true then it early exits with found = true
|
// If the user fn ever returns true then it early exits with found = true
|
||||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, activeOnly bool, fn listAllFn) (found bool, err error) {
|
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: "/folders/" + dirID + "/items",
|
Path: "/folders/" + dirID + "/items",
|
||||||
Parameters: fieldsValue(),
|
Parameters: fieldsValue(),
|
||||||
}
|
}
|
||||||
opts.Parameters.Set("limit", strconv.Itoa(f.opt.ListChunk))
|
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
|
||||||
opts.Parameters.Set("usemarker", "true")
|
offset := 0
|
||||||
var marker *string
|
|
||||||
OUTER:
|
OUTER:
|
||||||
for {
|
for {
|
||||||
if marker != nil {
|
opts.Parameters.Set("offset", strconv.Itoa(offset))
|
||||||
opts.Parameters.Set("marker", *marker)
|
|
||||||
}
|
|
||||||
|
|
||||||
var result api.FolderItems
|
var result api.FolderItems
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@@ -629,10 +607,7 @@ OUTER:
|
|||||||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if activeOnly && item.ItemStatus != api.ItemStatusActive {
|
if item.ItemStatus != api.ItemStatusActive {
|
||||||
continue
|
|
||||||
}
|
|
||||||
if f.opt.OwnedBy != "" && f.opt.OwnedBy != item.OwnedBy.Login {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||||
@@ -641,8 +616,8 @@ OUTER:
|
|||||||
break OUTER
|
break OUTER
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
marker = result.NextMarker
|
offset += result.Limit
|
||||||
if marker == nil {
|
if offset >= result.TotalCount {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -664,7 +639,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var iErr error
|
var iErr error
|
||||||
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
|
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||||
remote := path.Join(dir, info.Name)
|
remote := path.Join(dir, info.Name)
|
||||||
if info.Type == api.ItemTypeFolder {
|
if info.Type == api.ItemTypeFolder {
|
||||||
// cache the directory ID for later lookups
|
// cache the directory ID for later lookups
|
||||||
@@ -711,80 +686,22 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
|||||||
return o, leaf, directoryID, nil
|
return o, leaf, directoryID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// preUploadCheck checks to see if a file can be uploaded
|
|
||||||
//
|
|
||||||
// It returns "", nil if the file is good to go
|
|
||||||
// It returns "ID", nil if the file must be updated
|
|
||||||
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
|
|
||||||
check := api.PreUploadCheck{
|
|
||||||
Name: f.opt.Enc.FromStandardName(leaf),
|
|
||||||
Parent: api.Parent{
|
|
||||||
ID: directoryID,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if size >= 0 {
|
|
||||||
check.Size = &size
|
|
||||||
}
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "OPTIONS",
|
|
||||||
Path: "/files/content/",
|
|
||||||
}
|
|
||||||
var result api.PreUploadCheckResponse
|
|
||||||
var resp *http.Response
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, &check, &result)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "item_name_in_use" {
|
|
||||||
var conflict api.PreUploadCheckConflict
|
|
||||||
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
|
||||||
if err != nil {
|
|
||||||
return "", errors.Wrap(err, "pre-upload check: JSON decode failed")
|
|
||||||
}
|
|
||||||
if conflict.Conflicts.Type != api.ItemTypeFile {
|
|
||||||
return "", errors.Wrap(err, "pre-upload check: can't overwrite non file with file")
|
|
||||||
}
|
|
||||||
return conflict.Conflicts.ID, nil
|
|
||||||
}
|
|
||||||
return "", errors.Wrap(err, "pre-upload check")
|
|
||||||
}
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// If directory doesn't exist, file doesn't exist so can upload
|
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
||||||
remote := src.Remote()
|
switch err {
|
||||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
case nil:
|
||||||
if err != nil {
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||||
if err == fs.ErrorDirNotFound {
|
case fs.ErrorObjectNotFound:
|
||||||
return f.PutUnchecked(ctx, in, src, options...)
|
// Not found so create it
|
||||||
}
|
return f.PutUnchecked(ctx, in, src)
|
||||||
|
default:
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Preflight check the upload, which returns the ID if the
|
|
||||||
// object already exists
|
|
||||||
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if ID == "" {
|
|
||||||
return f.PutUnchecked(ctx, in, src, options...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If object exists then create a skeleton one with just id
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
id: ID,
|
|
||||||
}
|
|
||||||
return o, o.Update(ctx, in, src, options...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
@@ -1118,36 +1035,45 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
|||||||
|
|
||||||
// CleanUp empties the trash
|
// CleanUp empties the trash
|
||||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||||
var (
|
opts := rest.Opts{
|
||||||
deleteErrors = int64(0)
|
Method: "GET",
|
||||||
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
|
Path: "/folders/trash/items",
|
||||||
wg sync.WaitGroup
|
Parameters: url.Values{
|
||||||
)
|
"fields": []string{"type", "id"},
|
||||||
_, err = f.listAll(ctx, "trash", false, false, false, func(item *api.Item) bool {
|
},
|
||||||
|
}
|
||||||
|
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
|
||||||
|
offset := 0
|
||||||
|
for {
|
||||||
|
opts.Parameters.Set("offset", strconv.Itoa(offset))
|
||||||
|
|
||||||
|
var result api.FolderItems
|
||||||
|
var resp *http.Response
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "couldn't list trash")
|
||||||
|
}
|
||||||
|
for i := range result.Entries {
|
||||||
|
item := &result.Entries[i]
|
||||||
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
|
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
|
||||||
wg.Add(1)
|
|
||||||
concurrencyControl <- struct{}{}
|
|
||||||
go func() {
|
|
||||||
defer func() {
|
|
||||||
<-concurrencyControl
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
err := f.deletePermanently(ctx, item.Type, item.ID)
|
err := f.deletePermanently(ctx, item.Type, item.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
|
return errors.Wrap(err, "failed to delete file")
|
||||||
atomic.AddInt64(&deleteErrors, 1)
|
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
return false
|
|
||||||
})
|
|
||||||
wg.Wait()
|
|
||||||
if deleteErrors != 0 {
|
|
||||||
return errors.Errorf("failed to delete %d trash items", deleteErrors)
|
|
||||||
}
|
}
|
||||||
return err
|
offset += result.Limit
|
||||||
|
if offset >= result.TotalCount {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirCacheFlush resets the directory cache - used in testing as an
|
// DirCacheFlush resets the directory cache - used in testing as an
|
||||||
@@ -1201,9 +1127,6 @@ func (o *Object) Size() int64 {
|
|||||||
|
|
||||||
// setMetaData sets the metadata from info
|
// setMetaData sets the metadata from info
|
||||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||||
if info.Type == api.ItemTypeFolder {
|
|
||||||
return fs.ErrorIsDir
|
|
||||||
}
|
|
||||||
if info.Type != api.ItemTypeFile {
|
if info.Type != api.ItemTypeFile {
|
||||||
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
||||||
}
|
}
|
||||||
@@ -1305,7 +1228,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
|
|
||||||
// upload does a single non-multipart upload
|
// upload does a single non-multipart upload
|
||||||
//
|
//
|
||||||
// This is recommended for less than 50 MiB of content
|
// This is recommended for less than 50 MB of content
|
||||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
|
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
|
||||||
upload := api.UploadFile{
|
upload := api.UploadFile{
|
||||||
Name: o.fs.opt.Enc.FromStandardName(leaf),
|
Name: o.fs.opt.Enc.FromStandardName(leaf),
|
||||||
|
|||||||
52
backend/cache/cache.go
vendored
52
backend/cache/cache.go
vendored
@@ -1,4 +1,3 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
@@ -69,26 +68,26 @@ func init() {
|
|||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_url",
|
Name: "plex_url",
|
||||||
Help: "The URL of the Plex server.",
|
Help: "The URL of the Plex server",
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_username",
|
Name: "plex_username",
|
||||||
Help: "The username of the Plex user.",
|
Help: "The username of the Plex user",
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_password",
|
Name: "plex_password",
|
||||||
Help: "The password of the Plex user.",
|
Help: "The password of the Plex user",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_token",
|
Name: "plex_token",
|
||||||
Help: "The plex token for authentication - auto set normally.",
|
Help: "The plex token for authentication - auto set normally",
|
||||||
Hide: fs.OptionHideBoth,
|
Hide: fs.OptionHideBoth,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_insecure",
|
Name: "plex_insecure",
|
||||||
Help: "Skip all certificate verification when connecting to the Plex server.",
|
Help: "Skip all certificate verification when connecting to the Plex server",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
@@ -99,14 +98,14 @@ changed, any downloaded chunks will be invalid and cache-chunk-path
|
|||||||
will need to be cleared or unexpected EOF errors will occur.`,
|
will need to be cleared or unexpected EOF errors will occur.`,
|
||||||
Default: DefCacheChunkSize,
|
Default: DefCacheChunkSize,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "1M",
|
Value: "1m",
|
||||||
Help: "1 MiB",
|
Help: "1MB",
|
||||||
}, {
|
}, {
|
||||||
Value: "5M",
|
Value: "5M",
|
||||||
Help: "5 MiB",
|
Help: "5 MB",
|
||||||
}, {
|
}, {
|
||||||
Value: "10M",
|
Value: "10M",
|
||||||
Help: "10 MiB",
|
Help: "10 MB",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "info_age",
|
Name: "info_age",
|
||||||
@@ -133,22 +132,22 @@ oldest chunks until it goes under this value.`,
|
|||||||
Default: DefCacheTotalChunkSize,
|
Default: DefCacheTotalChunkSize,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "500M",
|
Value: "500M",
|
||||||
Help: "500 MiB",
|
Help: "500 MB",
|
||||||
}, {
|
}, {
|
||||||
Value: "1G",
|
Value: "1G",
|
||||||
Help: "1 GiB",
|
Help: "1 GB",
|
||||||
}, {
|
}, {
|
||||||
Value: "10G",
|
Value: "10G",
|
||||||
Help: "10 GiB",
|
Help: "10 GB",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "db_path",
|
Name: "db_path",
|
||||||
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
|
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||||
Help: "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name.",
|
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_path",
|
Name: "chunk_path",
|
||||||
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
|
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||||
Help: `Directory to cache chunk files.
|
Help: `Directory to cache chunk files.
|
||||||
|
|
||||||
Path to where partial file data (chunks) are stored locally. The remote
|
Path to where partial file data (chunks) are stored locally. The remote
|
||||||
@@ -168,7 +167,6 @@ then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
|
|||||||
Name: "chunk_clean_interval",
|
Name: "chunk_clean_interval",
|
||||||
Default: DefCacheChunkCleanInterval,
|
Default: DefCacheChunkCleanInterval,
|
||||||
Help: `How often should the cache perform cleanups of the chunk storage.
|
Help: `How often should the cache perform cleanups of the chunk storage.
|
||||||
|
|
||||||
The default value should be ok for most people. If you find that the
|
The default value should be ok for most people. If you find that the
|
||||||
cache goes over "cache-chunk-total-size" too often then try to lower
|
cache goes over "cache-chunk-total-size" too often then try to lower
|
||||||
this value to force it to perform cleanups more often.`,
|
this value to force it to perform cleanups more often.`,
|
||||||
@@ -222,7 +220,7 @@ available on the local machine.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "rps",
|
Name: "rps",
|
||||||
Default: int(DefCacheRps),
|
Default: int(DefCacheRps),
|
||||||
Help: `Limits the number of requests per second to the source FS (-1 to disable).
|
Help: `Limits the number of requests per second to the source FS (-1 to disable)
|
||||||
|
|
||||||
This setting places a hard limit on the number of requests per second
|
This setting places a hard limit on the number of requests per second
|
||||||
that cache will be doing to the cloud provider remote and try to
|
that cache will be doing to the cloud provider remote and try to
|
||||||
@@ -243,7 +241,7 @@ still pass.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "writes",
|
Name: "writes",
|
||||||
Default: DefCacheWrites,
|
Default: DefCacheWrites,
|
||||||
Help: `Cache file data on writes through the FS.
|
Help: `Cache file data on writes through the FS
|
||||||
|
|
||||||
If you need to read files immediately after you upload them through
|
If you need to read files immediately after you upload them through
|
||||||
cache you can enable this flag to have their data stored in the
|
cache you can enable this flag to have their data stored in the
|
||||||
@@ -264,7 +262,7 @@ provider`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "tmp_wait_time",
|
Name: "tmp_wait_time",
|
||||||
Default: DefCacheTmpWaitTime,
|
Default: DefCacheTmpWaitTime,
|
||||||
Help: `How long should files be stored in local cache before being uploaded.
|
Help: `How long should files be stored in local cache before being uploaded
|
||||||
|
|
||||||
This is the duration that a file must wait in the temporary location
|
This is the duration that a file must wait in the temporary location
|
||||||
_cache-tmp-upload-path_ before it is selected for upload.
|
_cache-tmp-upload-path_ before it is selected for upload.
|
||||||
@@ -275,7 +273,7 @@ to start the upload if a queue formed for this purpose.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "db_wait_time",
|
Name: "db_wait_time",
|
||||||
Default: DefCacheDbWaitTime,
|
Default: DefCacheDbWaitTime,
|
||||||
Help: `How long to wait for the DB to be available - 0 is unlimited.
|
Help: `How long to wait for the DB to be available - 0 is unlimited
|
||||||
|
|
||||||
Only one process can have the DB open at any one time, so rclone waits
|
Only one process can have the DB open at any one time, so rclone waits
|
||||||
for this duration for the DB to become available before it gives an
|
for this duration for the DB to become available before it gives an
|
||||||
@@ -341,14 +339,8 @@ func parseRootPath(path string) (string, error) {
|
|||||||
return strings.Trim(path, "/"), nil
|
return strings.Trim(path, "/"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var warnDeprecated sync.Once
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
warnDeprecated.Do(func() {
|
|
||||||
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
|
|
||||||
})
|
|
||||||
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -422,8 +414,8 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
dbPath := f.opt.DbPath
|
dbPath := f.opt.DbPath
|
||||||
chunkPath := f.opt.ChunkPath
|
chunkPath := f.opt.ChunkPath
|
||||||
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
|
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
|
||||||
if dbPath != filepath.Join(config.GetCacheDir(), "cache-backend") &&
|
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
|
||||||
chunkPath == filepath.Join(config.GetCacheDir(), "cache-backend") {
|
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
|
||||||
chunkPath = dbPath
|
chunkPath = dbPath
|
||||||
}
|
}
|
||||||
if filepath.Ext(dbPath) != "" {
|
if filepath.Ext(dbPath) != "" {
|
||||||
|
|||||||
19
backend/cache/cache_internal_test.go
vendored
19
backend/cache/cache_internal_test.go
vendored
@@ -1,5 +1,5 @@
|
|||||||
//go:build !plan9 && !js && !race
|
// +build !plan9,!js
|
||||||
// +build !plan9,!js,!race
|
// +build !race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@@ -16,7 +16,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -294,9 +293,6 @@ func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||||
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
|
|
||||||
t.Skip("Skip test on windows/386")
|
|
||||||
}
|
|
||||||
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
@@ -685,9 +681,6 @@ func TestInternalCacheWrites(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||||
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
|
|
||||||
t.Skip("Skip test on windows/386")
|
|
||||||
}
|
|
||||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
@@ -843,7 +836,7 @@ func newRun() *run {
|
|||||||
if uploadDir == "" {
|
if uploadDir == "" {
|
||||||
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
|
log.Fatalf("Failed to create temp dir: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
r.tmpUploadDir = uploadDir
|
r.tmpUploadDir = uploadDir
|
||||||
@@ -926,9 +919,9 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
runInstance.rootIsCrypt = rootIsCrypt
|
runInstance.rootIsCrypt = rootIsCrypt
|
||||||
runInstance.dbPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote+".db")
|
runInstance.dbPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
|
||||||
runInstance.chunkPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote)
|
runInstance.chunkPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
|
||||||
runInstance.vfsCachePath = filepath.Join(config.GetCacheDir(), "vfs", remote)
|
runInstance.vfsCachePath = filepath.Join(config.CacheDir, "vfs", remote)
|
||||||
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
|||||||
4
backend/cache/cache_test.go
vendored
4
backend/cache/cache_test.go
vendored
@@ -1,7 +1,7 @@
|
|||||||
// Test Cache filesystem interface
|
// Test Cache filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !js && !race
|
// +build !plan9,!js
|
||||||
// +build !plan9,!js,!race
|
// +build !race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
|
|||||||
1
backend/cache/cache_unsupported.go
vendored
1
backend/cache/cache_unsupported.go
vendored
@@ -1,7 +1,6 @@
|
|||||||
// Build for cache for unsupported platforms to stop go complaining
|
// Build for cache for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || js
|
|
||||||
// +build plan9 js
|
// +build plan9 js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|||||||
4
backend/cache/cache_upload_test.go
vendored
4
backend/cache/cache_upload_test.go
vendored
@@ -1,5 +1,5 @@
|
|||||||
//go:build !plan9 && !js && !race
|
// +build !plan9,!js
|
||||||
// +build !plan9,!js,!race
|
// +build !race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
|
|||||||
1
backend/cache/directory.go
vendored
1
backend/cache/directory.go
vendored
@@ -1,4 +1,3 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|||||||
1
backend/cache/handle.go
vendored
1
backend/cache/handle.go
vendored
@@ -1,4 +1,3 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|||||||
1
backend/cache/object.go
vendored
1
backend/cache/object.go
vendored
@@ -1,4 +1,3 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|||||||
1
backend/cache/plex.go
vendored
1
backend/cache/plex.go
vendored
@@ -1,4 +1,3 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|||||||
1
backend/cache/storage_memory.go
vendored
1
backend/cache/storage_memory.go
vendored
@@ -1,4 +1,3 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|||||||
1
backend/cache/storage_persistent.go
vendored
1
backend/cache/storage_persistent.go
vendored
@@ -1,4 +1,3 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|||||||
@@ -150,13 +150,12 @@ func init() {
|
|||||||
Name: "remote",
|
Name: "remote",
|
||||||
Required: true,
|
Required: true,
|
||||||
Help: `Remote to chunk/unchunk.
|
Help: `Remote to chunk/unchunk.
|
||||||
|
|
||||||
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||||
"myremote:bucket" or maybe "myremote:" (not recommended).`,
|
"myremote:bucket" or maybe "myremote:" (not recommended).`,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Advanced: false,
|
Advanced: false,
|
||||||
Default: fs.SizeSuffix(2147483648), // 2 GiB
|
Default: fs.SizeSuffix(2147483648), // 2GB
|
||||||
Help: `Files larger than chunk size will be split in chunks.`,
|
Help: `Files larger than chunk size will be split in chunks.`,
|
||||||
}, {
|
}, {
|
||||||
Name: "name_format",
|
Name: "name_format",
|
||||||
@@ -164,7 +163,6 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
|||||||
Hide: fs.OptionHideCommandLine,
|
Hide: fs.OptionHideCommandLine,
|
||||||
Default: `*.rclone_chunk.###`,
|
Default: `*.rclone_chunk.###`,
|
||||||
Help: `String format of chunk file names.
|
Help: `String format of chunk file names.
|
||||||
|
|
||||||
The two placeholders are: base file name (*) and chunk number (#...).
|
The two placeholders are: base file name (*) and chunk number (#...).
|
||||||
There must be one and only one asterisk and one or more consecutive hash characters.
|
There must be one and only one asterisk and one or more consecutive hash characters.
|
||||||
If chunk number has less digits than the number of hashes, it is left-padded by zeros.
|
If chunk number has less digits than the number of hashes, it is left-padded by zeros.
|
||||||
@@ -176,57 +174,48 @@ Possible chunk files are ignored if their name does not match given format.`,
|
|||||||
Hide: fs.OptionHideCommandLine,
|
Hide: fs.OptionHideCommandLine,
|
||||||
Default: 1,
|
Default: 1,
|
||||||
Help: `Minimum valid chunk number. Usually 0 or 1.
|
Help: `Minimum valid chunk number. Usually 0 or 1.
|
||||||
|
|
||||||
By default chunk numbers start from 1.`,
|
By default chunk numbers start from 1.`,
|
||||||
}, {
|
}, {
|
||||||
Name: "meta_format",
|
Name: "meta_format",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Hide: fs.OptionHideCommandLine,
|
Hide: fs.OptionHideCommandLine,
|
||||||
Default: "simplejson",
|
Default: "simplejson",
|
||||||
Help: `Format of the metadata object or "none".
|
Help: `Format of the metadata object or "none". By default "simplejson".
|
||||||
|
|
||||||
By default "simplejson".
|
|
||||||
Metadata is a small JSON file named after the composite file.`,
|
Metadata is a small JSON file named after the composite file.`,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "none",
|
Value: "none",
|
||||||
Help: `Do not use metadata files at all.
|
Help: `Do not use metadata files at all. Requires hash type "none".`,
|
||||||
Requires hash type "none".`,
|
|
||||||
}, {
|
}, {
|
||||||
Value: "simplejson",
|
Value: "simplejson",
|
||||||
Help: `Simple JSON supports hash sums and chunk validation.
|
Help: `Simple JSON supports hash sums and chunk validation.
|
||||||
|
|
||||||
It has the following fields: ver, size, nchunks, md5, sha1.`,
|
It has the following fields: ver, size, nchunks, md5, sha1.`,
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "hash_type",
|
Name: "hash_type",
|
||||||
Advanced: false,
|
Advanced: false,
|
||||||
Default: "md5",
|
Default: "md5",
|
||||||
Help: `Choose how chunker handles hash sums.
|
Help: `Choose how chunker handles hash sums. All modes but "none" require metadata.`,
|
||||||
|
|
||||||
All modes but "none" require metadata.`,
|
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "none",
|
Value: "none",
|
||||||
Help: `Pass any hash supported by wrapped remote for non-chunked files.
|
Help: `Pass any hash supported by wrapped remote for non-chunked files, return nothing otherwise`,
|
||||||
Return nothing otherwise.`,
|
|
||||||
}, {
|
}, {
|
||||||
Value: "md5",
|
Value: "md5",
|
||||||
Help: `MD5 for composite files.`,
|
Help: `MD5 for composite files`,
|
||||||
}, {
|
}, {
|
||||||
Value: "sha1",
|
Value: "sha1",
|
||||||
Help: `SHA1 for composite files.`,
|
Help: `SHA1 for composite files`,
|
||||||
}, {
|
}, {
|
||||||
Value: "md5all",
|
Value: "md5all",
|
||||||
Help: `MD5 for all files.`,
|
Help: `MD5 for all files`,
|
||||||
}, {
|
}, {
|
||||||
Value: "sha1all",
|
Value: "sha1all",
|
||||||
Help: `SHA1 for all files.`,
|
Help: `SHA1 for all files`,
|
||||||
}, {
|
}, {
|
||||||
Value: "md5quick",
|
Value: "md5quick",
|
||||||
Help: `Copying a file to chunker will request MD5 from the source.
|
Help: `Copying a file to chunker will request MD5 from the source falling back to SHA1 if unsupported`,
|
||||||
Falling back to SHA1 if unsupported.`,
|
|
||||||
}, {
|
}, {
|
||||||
Value: "sha1quick",
|
Value: "sha1quick",
|
||||||
Help: `Similar to "md5quick" but prefers SHA1 over MD5.`,
|
Help: `Similar to "md5quick" but prefers SHA1 over MD5`,
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "fail_hard",
|
Name: "fail_hard",
|
||||||
@@ -443,10 +432,10 @@ func (f *Fs) setHashType(hashType string) error {
|
|||||||
f.hashFallback = true
|
f.hashFallback = true
|
||||||
case "md5all":
|
case "md5all":
|
||||||
f.useMD5 = true
|
f.useMD5 = true
|
||||||
f.hashAll = !f.base.Hashes().Contains(hash.MD5) || f.base.Features().SlowHash
|
f.hashAll = !f.base.Hashes().Contains(hash.MD5)
|
||||||
case "sha1all":
|
case "sha1all":
|
||||||
f.useSHA1 = true
|
f.useSHA1 = true
|
||||||
f.hashAll = !f.base.Hashes().Contains(hash.SHA1) || f.base.Features().SlowHash
|
f.hashAll = !f.base.Hashes().Contains(hash.SHA1)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported hash type '%s'", hashType)
|
return fmt.Errorf("unsupported hash type '%s'", hashType)
|
||||||
}
|
}
|
||||||
@@ -823,7 +812,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
|||||||
tempEntries = append(tempEntries, wrapDir)
|
tempEntries = append(tempEntries, wrapDir)
|
||||||
default:
|
default:
|
||||||
if f.opt.FailHard {
|
if f.opt.FailHard {
|
||||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
return nil, fmt.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
fs.Debugf(f, "unknown object type %T", entry)
|
fs.Debugf(f, "unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
@@ -1110,7 +1099,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
|||||||
|
|
||||||
switch o.f.opt.MetaFormat {
|
switch o.f.opt.MetaFormat {
|
||||||
case "simplejson":
|
case "simplejson":
|
||||||
if len(data) > maxMetadataSizeWritten {
|
if data != nil && len(data) > maxMetadataSizeWritten {
|
||||||
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
|
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
|
||||||
}
|
}
|
||||||
var metadata metaSimpleJSON
|
var metadata metaSimpleJSON
|
||||||
@@ -1225,7 +1214,7 @@ func (f *Fs) put(
|
|||||||
// and skips the "EOF" read. Hence, switch to next limit here.
|
// and skips the "EOF" read. Hence, switch to next limit here.
|
||||||
if !(c.chunkLimit == 0 || c.chunkLimit == c.chunkSize || c.sizeTotal == -1 || c.done) {
|
if !(c.chunkLimit == 0 || c.chunkLimit == c.chunkSize || c.sizeTotal == -1 || c.done) {
|
||||||
silentlyRemove(ctx, chunk)
|
silentlyRemove(ctx, chunk)
|
||||||
return nil, fmt.Errorf("destination ignored %d data bytes", c.chunkLimit)
|
return nil, fmt.Errorf("Destination ignored %d data bytes", c.chunkLimit)
|
||||||
}
|
}
|
||||||
c.chunkLimit = c.chunkSize
|
c.chunkLimit = c.chunkSize
|
||||||
|
|
||||||
@@ -1234,7 +1223,7 @@ func (f *Fs) put(
|
|||||||
|
|
||||||
// Validate uploaded size
|
// Validate uploaded size
|
||||||
if c.sizeTotal != -1 && c.readCount != c.sizeTotal {
|
if c.sizeTotal != -1 && c.readCount != c.sizeTotal {
|
||||||
return nil, fmt.Errorf("incorrect upload size %d != %d", c.readCount, c.sizeTotal)
|
return nil, fmt.Errorf("Incorrect upload size %d != %d", c.readCount, c.sizeTotal)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for input that looks like valid metadata
|
// Check for input that looks like valid metadata
|
||||||
@@ -1271,7 +1260,7 @@ func (f *Fs) put(
|
|||||||
sizeTotal += chunk.Size()
|
sizeTotal += chunk.Size()
|
||||||
}
|
}
|
||||||
if sizeTotal != c.readCount {
|
if sizeTotal != c.readCount {
|
||||||
return nil, fmt.Errorf("incorrect chunks size %d != %d", sizeTotal, c.readCount)
|
return nil, fmt.Errorf("Incorrect chunks size %d != %d", sizeTotal, c.readCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If previous object was chunked, remove its chunks
|
// If previous object was chunked, remove its chunks
|
||||||
@@ -1459,7 +1448,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
|
|||||||
c.accountBytes(size)
|
c.accountBytes(size)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
const bufLen = 1048576 // 1 MiB
|
const bufLen = 1048576 // 1MB
|
||||||
buf := make([]byte, bufLen)
|
buf := make([]byte, bufLen)
|
||||||
for size > 0 {
|
for size > 0 {
|
||||||
n := size
|
n := size
|
||||||
@@ -2451,7 +2440,7 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
|
|||||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
||||||
// Be strict about JSON format
|
// Be strict about JSON format
|
||||||
// to reduce possibility that a random small file resembles metadata.
|
// to reduce possibility that a random small file resembles metadata.
|
||||||
if len(data) > maxMetadataSizeWritten {
|
if data != nil && len(data) > maxMetadataSizeWritten {
|
||||||
return nil, false, ErrMetaTooBig
|
return nil, false, ErrMetaTooBig
|
||||||
}
|
}
|
||||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||||
|
|||||||
@@ -12,8 +12,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/object"
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
@@ -35,35 +33,11 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
|||||||
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
||||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||||
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
||||||
Size: int64(kilobytes) * int64(fs.Kibi),
|
Size: int64(kilobytes) * int64(fs.KibiByte),
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type settings map[string]interface{}
|
|
||||||
|
|
||||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
|
||||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
|
||||||
configMap := configmap.Simple{}
|
|
||||||
for key, val := range opts {
|
|
||||||
configMap[key] = fmt.Sprintf("%v", val)
|
|
||||||
}
|
|
||||||
rpath := fspath.JoinRootPath(f.Root(), path)
|
|
||||||
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), rpath)
|
|
||||||
fixFs, err := fs.NewFs(ctx, remote)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return fixFs
|
|
||||||
}
|
|
||||||
|
|
||||||
var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
||||||
|
|
||||||
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
|
|
||||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
|
||||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
|
||||||
assert.NotNil(t, obj, message)
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
// test chunk name parser
|
// test chunk name parser
|
||||||
func testChunkNameFormat(t *testing.T, f *Fs) {
|
func testChunkNameFormat(t *testing.T, f *Fs) {
|
||||||
saveOpt := f.opt
|
saveOpt := f.opt
|
||||||
@@ -643,13 +617,22 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
|||||||
}()
|
}()
|
||||||
f.opt.FailHard = false
|
f.opt.FailHard = false
|
||||||
|
|
||||||
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||||
|
|
||||||
|
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||||
|
item := fstest.Item{Path: name, ModTime: modTime}
|
||||||
|
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||||
|
assert.NotNil(t, obj, message)
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
|
||||||
runSubtest := func(contents, name string) {
|
runSubtest := func(contents, name string) {
|
||||||
description := fmt.Sprintf("file with %s metadata", name)
|
description := fmt.Sprintf("file with %s metadata", name)
|
||||||
filename := path.Join(dir, name)
|
filename := path.Join(dir, name)
|
||||||
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
||||||
|
|
||||||
part := testPutFile(ctx, t, f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
||||||
_ = testPutFile(ctx, t, f, filename, contents, "upload "+description, false)
|
_ = putFile(f, filename, contents, "upload "+description, false)
|
||||||
|
|
||||||
obj, err := f.NewObject(ctx, filename)
|
obj, err := f.NewObject(ctx, filename)
|
||||||
assert.NoError(t, err, "access "+description)
|
assert.NoError(t, err, "access "+description)
|
||||||
@@ -695,7 +678,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
|||||||
|
|
||||||
// Test that chunker refuses to change on objects with future/unknown metadata
|
// Test that chunker refuses to change on objects with future/unknown metadata
|
||||||
func testFutureProof(t *testing.T, f *Fs) {
|
func testFutureProof(t *testing.T, f *Fs) {
|
||||||
if !f.useMeta {
|
if f.opt.MetaFormat == "none" {
|
||||||
t.Skip("this test requires metadata support")
|
t.Skip("this test requires metadata support")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -861,44 +844,6 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
|||||||
_ = operations.Purge(ctx, f.base, dir)
|
_ = operations.Purge(ctx, f.base, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that md5all creates metadata even for small files
|
|
||||||
func testMD5AllSlow(t *testing.T, f *Fs) {
|
|
||||||
ctx := context.Background()
|
|
||||||
fsResult := deriveFs(ctx, t, f, "md5all", settings{
|
|
||||||
"chunk_size": "1P",
|
|
||||||
"name_format": "*.#",
|
|
||||||
"hash_type": "md5all",
|
|
||||||
"transactions": "rename",
|
|
||||||
"meta_format": "simplejson",
|
|
||||||
})
|
|
||||||
chunkFs, ok := fsResult.(*Fs)
|
|
||||||
require.True(t, ok, "fs must be a chunker remote")
|
|
||||||
baseFs := chunkFs.base
|
|
||||||
if !baseFs.Features().SlowHash {
|
|
||||||
t.Skipf("this test needs a base fs with slow hash, e.g. local")
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.True(t, chunkFs.useMD5, "must use md5")
|
|
||||||
assert.True(t, chunkFs.hashAll, "must hash all files")
|
|
||||||
|
|
||||||
_ = testPutFile(ctx, t, chunkFs, "file", "-", "error", true)
|
|
||||||
obj, err := chunkFs.NewObject(ctx, "file")
|
|
||||||
require.NoError(t, err)
|
|
||||||
sum, err := obj.Hash(ctx, hash.MD5)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, "336d5ebc5436534e61d16e63ddfca327", sum)
|
|
||||||
|
|
||||||
list, err := baseFs.List(ctx, "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 2, len(list))
|
|
||||||
_, err = baseFs.NewObject(ctx, "file")
|
|
||||||
assert.NoError(t, err, "metadata must be created")
|
|
||||||
_, err = baseFs.NewObject(ctx, "file.1")
|
|
||||||
assert.NoError(t, err, "first chunk must be created")
|
|
||||||
|
|
||||||
require.NoError(t, operations.Purge(ctx, baseFs, ""))
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalTest dispatches all internal tests
|
// InternalTest dispatches all internal tests
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
t.Run("PutLarge", func(t *testing.T) {
|
t.Run("PutLarge", func(t *testing.T) {
|
||||||
@@ -931,9 +876,6 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
t.Run("ChunkerServerSideMove", func(t *testing.T) {
|
t.Run("ChunkerServerSideMove", func(t *testing.T) {
|
||||||
testChunkerServerSideMove(t, f)
|
testChunkerServerSideMove(t, f)
|
||||||
})
|
})
|
||||||
t.Run("MD5AllSlow", func(t *testing.T) {
|
|
||||||
testMD5AllSlow(t, f)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ import (
|
|||||||
// Globals
|
// Globals
|
||||||
const (
|
const (
|
||||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
maxChunkSize = 8388608 // at 256KB and 8 MB.
|
||||||
|
|
||||||
bufferSize = 8388608
|
bufferSize = 8388608
|
||||||
heuristicBytes = 1048576
|
heuristicBytes = 1048576
|
||||||
@@ -53,7 +53,7 @@ const (
|
|||||||
Gzip = 2
|
Gzip = 2
|
||||||
)
|
)
|
||||||
|
|
||||||
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9-_]{11})$")
|
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9+_]{11})$")
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
@@ -84,11 +84,11 @@ func init() {
|
|||||||
Help: `GZIP compression level (-2 to 9).
|
Help: `GZIP compression level (-2 to 9).
|
||||||
|
|
||||||
Generally -1 (default, equivalent to 5) is recommended.
|
Generally -1 (default, equivalent to 5) is recommended.
|
||||||
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
Levels 1 to 9 increase compressiong at the cost of speed.. Going past 6
|
||||||
generally offers very little return.
|
generally offers very little return.
|
||||||
|
|
||||||
Level -2 uses Huffmann encoding only. Only use if you know what you
|
Level -2 uses Huffmann encoding only. Only use if you now what you
|
||||||
are doing.
|
are doing
|
||||||
Level 0 turns off compression.`,
|
Level 0 turns off compression.`,
|
||||||
Default: sgzip.DefaultCompression,
|
Default: sgzip.DefaultCompression,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
@@ -98,8 +98,8 @@ Level 0 turns off compression.`,
|
|||||||
In this case the compressed file will need to be cached to determine
|
In this case the compressed file will need to be cached to determine
|
||||||
it's size.
|
it's size.
|
||||||
|
|
||||||
Files smaller than this limit will be cached in RAM, files larger than
|
Files smaller than this limit will be cached in RAM, file larger than
|
||||||
this limit will be cached on disk.`,
|
this limit will be cached on disk`,
|
||||||
Default: fs.SizeSuffix(20 * 1024 * 1024),
|
Default: fs.SizeSuffix(20 * 1024 * 1024),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
@@ -1260,7 +1260,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
return o.Object.Open(ctx, options...)
|
return o.Object.Open(ctx, options...)
|
||||||
}
|
}
|
||||||
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
|
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
|
||||||
var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
|
var openOptions []fs.OpenOption = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
switch x := option.(type) {
|
switch x := option.(type) {
|
||||||
|
|||||||
@@ -12,14 +12,12 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/lib/version"
|
|
||||||
"github.com/rfjakob/eme"
|
"github.com/rfjakob/eme"
|
||||||
"golang.org/x/crypto/nacl/secretbox"
|
"golang.org/x/crypto/nacl/secretbox"
|
||||||
"golang.org/x/crypto/scrypt"
|
"golang.org/x/crypto/scrypt"
|
||||||
@@ -444,32 +442,11 @@ func (c *Cipher) encryptFileName(in string) string {
|
|||||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Strip version string so that only the non-versioned part
|
|
||||||
// of the file name gets encrypted/obfuscated
|
|
||||||
hasVersion := false
|
|
||||||
var t time.Time
|
|
||||||
if i == (len(segments)-1) && version.Match(segments[i]) {
|
|
||||||
var s string
|
|
||||||
t, s = version.Remove(segments[i])
|
|
||||||
// version.Remove can fail, in which case it returns segments[i]
|
|
||||||
if s != segments[i] {
|
|
||||||
segments[i] = s
|
|
||||||
hasVersion = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.mode == NameEncryptionStandard {
|
if c.mode == NameEncryptionStandard {
|
||||||
segments[i] = c.encryptSegment(segments[i])
|
segments[i] = c.encryptSegment(segments[i])
|
||||||
} else {
|
} else {
|
||||||
segments[i] = c.obfuscateSegment(segments[i])
|
segments[i] = c.obfuscateSegment(segments[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add back a version to the encrypted/obfuscated
|
|
||||||
// file name, if we stripped it off earlier
|
|
||||||
if hasVersion {
|
|
||||||
segments[i] = version.Add(segments[i], t)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return strings.Join(segments, "/")
|
return strings.Join(segments, "/")
|
||||||
}
|
}
|
||||||
@@ -500,21 +477,6 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
|||||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Strip version string so that only the non-versioned part
|
|
||||||
// of the file name gets decrypted/deobfuscated
|
|
||||||
hasVersion := false
|
|
||||||
var t time.Time
|
|
||||||
if i == (len(segments)-1) && version.Match(segments[i]) {
|
|
||||||
var s string
|
|
||||||
t, s = version.Remove(segments[i])
|
|
||||||
// version.Remove can fail, in which case it returns segments[i]
|
|
||||||
if s != segments[i] {
|
|
||||||
segments[i] = s
|
|
||||||
hasVersion = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.mode == NameEncryptionStandard {
|
if c.mode == NameEncryptionStandard {
|
||||||
segments[i], err = c.decryptSegment(segments[i])
|
segments[i], err = c.decryptSegment(segments[i])
|
||||||
} else {
|
} else {
|
||||||
@@ -524,12 +486,6 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add back a version to the decrypted/deobfuscated
|
|
||||||
// file name, if we stripped it off earlier
|
|
||||||
if hasVersion {
|
|
||||||
segments[i] = version.Add(segments[i], t)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return strings.Join(segments, "/"), nil
|
return strings.Join(segments, "/"), nil
|
||||||
}
|
}
|
||||||
@@ -538,19 +494,11 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
|||||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||||
if c.mode == NameEncryptionOff {
|
if c.mode == NameEncryptionOff {
|
||||||
remainingLength := len(in) - len(encryptedSuffix)
|
remainingLength := len(in) - len(encryptedSuffix)
|
||||||
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
|
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
|
||||||
|
return in[:remainingLength], nil
|
||||||
|
}
|
||||||
return "", ErrorNotAnEncryptedFile
|
return "", ErrorNotAnEncryptedFile
|
||||||
}
|
}
|
||||||
decrypted := in[:remainingLength]
|
|
||||||
if version.Match(decrypted) {
|
|
||||||
_, unversioned := version.Remove(decrypted)
|
|
||||||
if unversioned == "" {
|
|
||||||
return "", ErrorNotAnEncryptedFile
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Leave the version string on, if it was there
|
|
||||||
return decrypted, nil
|
|
||||||
}
|
|
||||||
return c.decryptFileName(in)
|
return c.decryptFileName(in)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -160,29 +160,22 @@ func TestEncryptFileName(t *testing.T) {
|
|||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
|
||||||
// Standard mode with directory name encryption off
|
// Standard mode with directory name encryption off
|
||||||
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||||
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
|
||||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
|
||||||
// Now off mode
|
// Now off mode
|
||||||
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
||||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||||
// Obfuscation mode
|
// Obfuscation mode
|
||||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
|
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
|
||||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||||
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
|
||||||
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
|
|
||||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||||
// Obfuscation mode with directory name encryption off
|
// Obfuscation mode with directory name encryption off
|
||||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
|
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
|
||||||
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||||
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
|
||||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||||
}
|
}
|
||||||
@@ -201,19 +194,14 @@ func TestDecryptFileName(t *testing.T) {
|
|||||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||||
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
|
|
||||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
|
||||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
|
||||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
|
||||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
|
||||||
} {
|
} {
|
||||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
||||||
actual, actualErr := c.DecryptFileName(test.in)
|
actual, actualErr := c.DecryptFileName(test.in)
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ func init() {
|
|||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "filename_encryption",
|
Name: "filename_encryption",
|
||||||
@@ -39,13 +39,13 @@ func init() {
|
|||||||
Examples: []fs.OptionExample{
|
Examples: []fs.OptionExample{
|
||||||
{
|
{
|
||||||
Value: "standard",
|
Value: "standard",
|
||||||
Help: "Encrypt the filenames.\nSee the docs for the details.",
|
Help: "Encrypt the filenames see the docs for the details.",
|
||||||
}, {
|
}, {
|
||||||
Value: "obfuscate",
|
Value: "obfuscate",
|
||||||
Help: "Very simple filename obfuscation.",
|
Help: "Very simple filename obfuscation.",
|
||||||
}, {
|
}, {
|
||||||
Value: "off",
|
Value: "off",
|
||||||
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
|
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@@ -71,7 +71,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
|||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "password2",
|
Name: "password2",
|
||||||
Help: "Password or pass phrase for salt.\n\nOptional but recommended.\nShould be different to the previous password.",
|
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "server_side_across_configs",
|
Name: "server_side_across_configs",
|
||||||
@@ -363,11 +363,7 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
|||||||
// put implements Put or PutStream
|
// put implements Put or PutStream
|
||||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||||
if f.opt.NoDataEncryption {
|
if f.opt.NoDataEncryption {
|
||||||
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||||
if err == nil && o != nil {
|
|
||||||
o = f.newObject(o)
|
|
||||||
}
|
|
||||||
return o, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt the data into wrappedIn
|
// Encrypt the data into wrappedIn
|
||||||
@@ -999,9 +995,6 @@ func (o *ObjectInfo) Size() int64 {
|
|||||||
if size < 0 {
|
if size < 0 {
|
||||||
return size
|
return size
|
||||||
}
|
}
|
||||||
if o.f.opt.NoDataEncryption {
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
return o.f.cipher.EncryptedSize(size)
|
return o.f.cipher.EncryptedSize(size)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
@@ -32,7 +33,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/filter"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
@@ -68,8 +68,8 @@ const (
|
|||||||
defaultScope = "drive"
|
defaultScope = "drive"
|
||||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||||
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
|
minChunkSize = 256 * fs.KibiByte
|
||||||
defaultChunkSize = 8 * fs.Mebi
|
defaultChunkSize = 8 * fs.MebiByte
|
||||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||||
@@ -183,71 +183,32 @@ func init() {
|
|||||||
Description: "Google Drive",
|
Description: "Google Drive",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't parse config into struct")
|
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
switch config.State {
|
|
||||||
case "":
|
|
||||||
// Fill in the scopes
|
// Fill in the scopes
|
||||||
driveConfig.Scopes = driveScopes(opt.Scope)
|
driveConfig.Scopes = driveScopes(opt.Scope)
|
||||||
|
|
||||||
// Set the root_folder_id if using drive.appfolder
|
// Set the root_folder_id if using drive.appfolder
|
||||||
if driveScopesContainsAppFolder(driveConfig.Scopes) {
|
if driveScopesContainsAppFolder(driveConfig.Scopes) {
|
||||||
m.Set("root_folder_id", "appDataFolder")
|
m.Set("root_folder_id", "appDataFolder")
|
||||||
}
|
}
|
||||||
|
|
||||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
|
if opt.ServiceAccountFile == "" {
|
||||||
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
|
err = oauthutil.Config(ctx, "drive", name, m, driveConfig, nil)
|
||||||
OAuth2Config: driveConfig,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return fs.ConfigGoto("teamdrive")
|
|
||||||
case "teamdrive":
|
|
||||||
if opt.TeamDriveID == "" {
|
|
||||||
return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", "Configure this as a Shared Drive (Team Drive)?\n")
|
|
||||||
}
|
|
||||||
return fs.ConfigConfirm("teamdrive_change", false, "config_change_team_drive", fmt.Sprintf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID))
|
|
||||||
case "teamdrive_ok":
|
|
||||||
if config.Result == "false" {
|
|
||||||
m.Set("team_drive", "")
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return fs.ConfigGoto("teamdrive_config")
|
|
||||||
case "teamdrive_change":
|
|
||||||
if config.Result == "false" {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return fs.ConfigGoto("teamdrive_config")
|
|
||||||
case "teamdrive_config":
|
|
||||||
f, err := newFs(ctx, name, "", m)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to make Fs to list Shared Drives")
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
teamDrives, err := f.listTeamDrives(ctx)
|
}
|
||||||
|
err = configTeamDrive(ctx, opt, m, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
log.Fatalf("Failed to configure Shared Drive: %v", err)
|
||||||
}
|
}
|
||||||
if len(teamDrives) == 0 {
|
|
||||||
return fs.ConfigError("", "No Shared Drives found in your account")
|
|
||||||
}
|
|
||||||
return fs.ConfigChoose("teamdrive_final", "config_team_drive", "Shared Drive", len(teamDrives), func(i int) (string, string) {
|
|
||||||
teamDrive := teamDrives[i]
|
|
||||||
return teamDrive.Id, teamDrive.Name
|
|
||||||
})
|
|
||||||
case "teamdrive_final":
|
|
||||||
driveID := config.Result
|
|
||||||
m.Set("team_drive", driveID)
|
|
||||||
m.Set("root_folder_id", "")
|
|
||||||
opt.TeamDriveID = driveID
|
|
||||||
opt.RootFolderID = ""
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
|
||||||
},
|
},
|
||||||
Options: append(driveOAuthOptions(), []fs.Option{{
|
Options: append(driveOAuthOptions(), []fs.Option{{
|
||||||
Name: "scope",
|
Name: "scope",
|
||||||
@@ -270,7 +231,7 @@ func init() {
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "root_folder_id",
|
Name: "root_folder_id",
|
||||||
Help: `ID of the root folder.
|
Help: `ID of the root folder
|
||||||
Leave blank normally.
|
Leave blank normally.
|
||||||
|
|
||||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||||
@@ -278,15 +239,15 @@ a non root folder as its starting point.
|
|||||||
`,
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "service_account_file",
|
Name: "service_account_file",
|
||||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||||
}, {
|
}, {
|
||||||
Name: "service_account_credentials",
|
Name: "service_account_credentials",
|
||||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||||
Hide: fs.OptionHideConfigurator,
|
Hide: fs.OptionHideConfigurator,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "team_drive",
|
Name: "team_drive",
|
||||||
Help: "ID of the Shared Drive (Team Drive).",
|
Help: "ID of the Shared Drive (Team Drive)",
|
||||||
Hide: fs.OptionHideConfigurator,
|
Hide: fs.OptionHideConfigurator,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -297,12 +258,12 @@ a non root folder as its starting point.
|
|||||||
}, {
|
}, {
|
||||||
Name: "use_trash",
|
Name: "use_trash",
|
||||||
Default: true,
|
Default: true,
|
||||||
Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
|
Help: "Send files to the trash instead of deleting permanently.\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "skip_gdocs",
|
Name: "skip_gdocs",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: "Skip google documents in all listings.\n\nIf given, gdocs practically become invisible to rclone.",
|
Help: "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "skip_checksum_gphotos",
|
Name: "skip_checksum_gphotos",
|
||||||
@@ -335,7 +296,7 @@ commands (copy, sync, etc.), and with all other commands too.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "trashed_only",
|
Name: "trashed_only",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
|
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "starred_only",
|
Name: "starred_only",
|
||||||
@@ -345,7 +306,7 @@ commands (copy, sync, etc.), and with all other commands too.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "formats",
|
Name: "formats",
|
||||||
Default: "",
|
Default: "",
|
||||||
Help: "Deprecated: See export_formats.",
|
Help: "Deprecated: see export_formats",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Hide: fs.OptionHideConfigurator,
|
Hide: fs.OptionHideConfigurator,
|
||||||
}, {
|
}, {
|
||||||
@@ -361,12 +322,12 @@ commands (copy, sync, etc.), and with all other commands too.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "allow_import_name_change",
|
Name: "allow_import_name_change",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: "Allow the filetype to change when uploading Google docs.\n\nE.g. file.doc to file.docx. This will confuse sync and reupload every time.",
|
Help: "Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "use_created_date",
|
Name: "use_created_date",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Use file created date instead of modified date.
|
Help: `Use file created date instead of modified date.,
|
||||||
|
|
||||||
Useful when downloading data and you want the creation date used in
|
Useful when downloading data and you want the creation date used in
|
||||||
place of the last modified date.
|
place of the last modified date.
|
||||||
@@ -400,7 +361,7 @@ date is used.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "list_chunk",
|
Name: "list_chunk",
|
||||||
Default: 1000,
|
Default: 1000,
|
||||||
Help: "Size of listing chunk 100-1000, 0 to disable.",
|
Help: "Size of listing chunk 100-1000. 0 to disable.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "impersonate",
|
Name: "impersonate",
|
||||||
@@ -410,19 +371,17 @@ date is used.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "alternate_export",
|
Name: "alternate_export",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: "Deprecated: No longer needed.",
|
Help: "Deprecated: no longer needed",
|
||||||
Hide: fs.OptionHideBoth,
|
Hide: fs.OptionHideBoth,
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_cutoff",
|
Name: "upload_cutoff",
|
||||||
Default: defaultChunkSize,
|
Default: defaultChunkSize,
|
||||||
Help: "Cutoff for switching to chunked upload.",
|
Help: "Cutoff for switching to chunked upload",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Default: defaultChunkSize,
|
Default: defaultChunkSize,
|
||||||
Help: `Upload chunk size.
|
Help: `Upload chunk size. Must a power of 2 >= 256k.
|
||||||
|
|
||||||
Must a power of 2 >= 256k.
|
|
||||||
|
|
||||||
Making this larger will improve performance, but note that each chunk
|
Making this larger will improve performance, but note that each chunk
|
||||||
is buffered in memory one per transfer.
|
is buffered in memory one per transfer.
|
||||||
@@ -492,7 +451,7 @@ configurations.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "disable_http2",
|
Name: "disable_http2",
|
||||||
Default: true,
|
Default: true,
|
||||||
Help: `Disable drive using http2.
|
Help: `Disable drive using http2
|
||||||
|
|
||||||
There is currently an unsolved issue with the google drive backend and
|
There is currently an unsolved issue with the google drive backend and
|
||||||
HTTP/2. HTTP/2 is therefore disabled by default for the drive backend
|
HTTP/2. HTTP/2 is therefore disabled by default for the drive backend
|
||||||
@@ -506,9 +465,9 @@ See: https://github.com/rclone/rclone/issues/3631
|
|||||||
}, {
|
}, {
|
||||||
Name: "stop_on_upload_limit",
|
Name: "stop_on_upload_limit",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Make upload limit errors be fatal.
|
Help: `Make upload limit errors be fatal
|
||||||
|
|
||||||
At the time of writing it is only possible to upload 750 GiB of data to
|
At the time of writing it is only possible to upload 750GB of data to
|
||||||
Google Drive a day (this is an undocumented limit). When this limit is
|
Google Drive a day (this is an undocumented limit). When this limit is
|
||||||
reached Google Drive produces a slightly different error message. When
|
reached Google Drive produces a slightly different error message. When
|
||||||
this flag is set it causes these errors to be fatal. These will stop
|
this flag is set it causes these errors to be fatal. These will stop
|
||||||
@@ -523,9 +482,9 @@ See: https://github.com/rclone/rclone/issues/3857
|
|||||||
}, {
|
}, {
|
||||||
Name: "stop_on_download_limit",
|
Name: "stop_on_download_limit",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Make download limit errors be fatal.
|
Help: `Make download limit errors be fatal
|
||||||
|
|
||||||
At the time of writing it is only possible to download 10 TiB of data from
|
At the time of writing it is only possible to download 10TB of data from
|
||||||
Google Drive a day (this is an undocumented limit). When this limit is
|
Google Drive a day (this is an undocumented limit). When this limit is
|
||||||
reached Google Drive produces a slightly different error message. When
|
reached Google Drive produces a slightly different error message. When
|
||||||
this flag is set it causes these errors to be fatal. These will stop
|
this flag is set it causes these errors to be fatal. These will stop
|
||||||
@@ -537,7 +496,7 @@ Google don't document so it may break in the future.
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "skip_shortcuts",
|
Name: "skip_shortcuts",
|
||||||
Help: `If set skip shortcut files.
|
Help: `If set skip shortcut files
|
||||||
|
|
||||||
Normally rclone dereferences shortcut files making them appear as if
|
Normally rclone dereferences shortcut files making them appear as if
|
||||||
they are the original file (see [the shortcuts section](#shortcuts)).
|
they are the original file (see [the shortcuts section](#shortcuts)).
|
||||||
@@ -563,7 +522,7 @@ If this flag is set then rclone will ignore shortcut files completely.
|
|||||||
} {
|
} {
|
||||||
for mimeType, extension := range m {
|
for mimeType, extension := range m {
|
||||||
if err := mime.AddExtensionType(extension, mimeType); err != nil {
|
if err := mime.AddExtensionType(extension, mimeType); err != nil {
|
||||||
fs.Errorf("Failed to register MIME type %q: %v", mimeType, err)
|
log.Fatalf("Failed to register MIME type %q: %v", mimeType, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -619,7 +578,6 @@ type Fs struct {
|
|||||||
client *http.Client // authorized client
|
client *http.Client // authorized client
|
||||||
rootFolderID string // the id of the root folder
|
rootFolderID string // the id of the root folder
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
lastQuery string // Last query string to check in unit tests
|
|
||||||
pacer *fs.Pacer // To pace the API calls
|
pacer *fs.Pacer // To pace the API calls
|
||||||
exportExtensions []string // preferred extensions to download docs
|
exportExtensions []string // preferred extensions to download docs
|
||||||
importMimeTypes []string // MIME types to convert to docs
|
importMimeTypes []string // MIME types to convert to docs
|
||||||
@@ -833,31 +791,11 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
|||||||
if filesOnly {
|
if filesOnly {
|
||||||
query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
|
query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Constrain query using filter if this remote is a sync/copy/walk source.
|
|
||||||
if fi, use := filter.GetConfig(ctx), filter.GetUseFilter(ctx); fi != nil && use {
|
|
||||||
queryByTime := func(op string, tm time.Time) {
|
|
||||||
if tm.IsZero() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// https://developers.google.com/drive/api/v3/ref-search-terms#operators
|
|
||||||
// Query times use RFC 3339 format, default timezone is UTC
|
|
||||||
timeStr := tm.UTC().Format("2006-01-02T15:04:05")
|
|
||||||
term := fmt.Sprintf("(modifiedTime %s '%s' or mimeType = '%s')", op, timeStr, driveFolderType)
|
|
||||||
query = append(query, term)
|
|
||||||
}
|
|
||||||
queryByTime(">=", fi.ModTimeFrom)
|
|
||||||
queryByTime("<=", fi.ModTimeTo)
|
|
||||||
}
|
|
||||||
|
|
||||||
list := f.svc.Files.List()
|
list := f.svc.Files.List()
|
||||||
queryString := strings.Join(query, " and ")
|
if len(query) > 0 {
|
||||||
if queryString != "" {
|
list.Q(strings.Join(query, " and "))
|
||||||
list.Q(queryString)
|
// fmt.Printf("list Query = %q\n", query)
|
||||||
// fs.Debugf(f, "list query: %q", queryString)
|
|
||||||
}
|
}
|
||||||
f.lastQuery = queryString // for unit tests
|
|
||||||
|
|
||||||
if f.opt.ListChunk > 0 {
|
if f.opt.ListChunk > 0 {
|
||||||
list.PageSize(f.opt.ListChunk)
|
list.PageSize(f.opt.ListChunk)
|
||||||
}
|
}
|
||||||
@@ -1011,6 +949,48 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Figure out if the user wants to use a team drive
|
||||||
|
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
|
|
||||||
|
// Stop if we are running non-interactive config
|
||||||
|
if ci.AutoConfirm {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if opt.TeamDriveID == "" {
|
||||||
|
fmt.Printf("Configure this as a Shared Drive (Team Drive)?\n")
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID)
|
||||||
|
}
|
||||||
|
if !config.Confirm(false) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
f, err := newFs(ctx, name, "", m)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to make Fs to list Shared Drives")
|
||||||
|
}
|
||||||
|
fmt.Printf("Fetching Shared Drive list...\n")
|
||||||
|
teamDrives, err := f.listTeamDrives(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(teamDrives) == 0 {
|
||||||
|
fmt.Printf("No Shared Drives found in your account")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var driveIDs, driveNames []string
|
||||||
|
for _, teamDrive := range teamDrives {
|
||||||
|
driveIDs = append(driveIDs, teamDrive.Id)
|
||||||
|
driveNames = append(driveNames, teamDrive.Name)
|
||||||
|
}
|
||||||
|
driveID := config.Choose("Enter a Shared Drive ID", driveIDs, driveNames, true)
|
||||||
|
m.Set("team_drive", driveID)
|
||||||
|
m.Set("root_folder_id", "")
|
||||||
|
opt.TeamDriveID = driveID
|
||||||
|
opt.RootFolderID = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// getClient makes an http client according to the options
|
// getClient makes an http client according to the options
|
||||||
func getClient(ctx context.Context, opt *Options) *http.Client {
|
func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||||
t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||||
@@ -1189,7 +1169,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
f.rootFolderID = rootID
|
f.rootFolderID = rootID
|
||||||
fs.Debugf(f, "'root_folder_id = %s' - save this in the config to speed up startup", rootID)
|
fs.Debugf(f, "root_folder_id = %q - save this in the config to speed up startup", rootID)
|
||||||
}
|
}
|
||||||
|
|
||||||
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
|
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
|
||||||
@@ -1352,8 +1332,8 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
|
|||||||
//
|
//
|
||||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) {
|
||||||
// If item has MD5 sum it is a file stored on drive
|
// If item has MD5 sum or a length it is a file stored on drive
|
||||||
if info.Md5Checksum != "" {
|
if info.Md5Checksum != "" || info.Size > 0 {
|
||||||
return f.newRegularObject(remote, info), nil
|
return f.newRegularObject(remote, info), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1376,7 +1356,7 @@ func (f *Fs) newObjectWithExportInfo(
|
|||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case info.MimeType == driveFolderType:
|
case info.MimeType == driveFolderType:
|
||||||
return nil, fs.ErrorIsDir
|
return nil, fs.ErrorNotAFile
|
||||||
case info.MimeType == shortcutMimeType:
|
case info.MimeType == shortcutMimeType:
|
||||||
// We can only get here if f.opt.SkipShortcuts is set
|
// We can only get here if f.opt.SkipShortcuts is set
|
||||||
// and not from a listing. This is unlikely.
|
// and not from a listing. This is unlikely.
|
||||||
@@ -1386,8 +1366,8 @@ func (f *Fs) newObjectWithExportInfo(
|
|||||||
// Pretend a dangling shortcut is a regular object
|
// Pretend a dangling shortcut is a regular object
|
||||||
// It will error if used, but appear in listings so it can be deleted
|
// It will error if used, but appear in listings so it can be deleted
|
||||||
return f.newRegularObject(remote, info), nil
|
return f.newRegularObject(remote, info), nil
|
||||||
case info.Md5Checksum != "":
|
case info.Md5Checksum != "" || info.Size > 0:
|
||||||
// If item has MD5 sum it is a file stored on drive
|
// If item has MD5 sum or a length it is a file stored on drive
|
||||||
return f.newRegularObject(remote, info), nil
|
return f.newRegularObject(remote, info), nil
|
||||||
case f.opt.SkipGdocs:
|
case f.opt.SkipGdocs:
|
||||||
fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
|
fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
|
||||||
@@ -2147,7 +2127,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
// Don't retry, return a retry error instead
|
// Don't retry, return a retry error instead
|
||||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||||
info, err = f.svc.Files.Create(createInfo).
|
info, err = f.svc.Files.Create(createInfo).
|
||||||
Media(in, googleapi.ContentType(srcMimeType), googleapi.ChunkSize(0)).
|
Media(in, googleapi.ContentType(srcMimeType)).
|
||||||
Fields(partialFields).
|
Fields(partialFields).
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
KeepRevisionForever(f.opt.KeepRevisionForever).
|
KeepRevisionForever(f.opt.KeepRevisionForever).
|
||||||
@@ -2924,7 +2904,7 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
|||||||
}
|
}
|
||||||
isDir = true
|
isDir = true
|
||||||
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
|
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
|
||||||
if err != fs.ErrorIsDir {
|
if err != fs.ErrorNotAFile {
|
||||||
return nil, errors.Wrap(err, "can't find source")
|
return nil, errors.Wrap(err, "can't find source")
|
||||||
}
|
}
|
||||||
// source was a directory
|
// source was a directory
|
||||||
@@ -2944,7 +2924,7 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
|||||||
if err != fs.ErrorObjectNotFound {
|
if err != fs.ErrorObjectNotFound {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = errors.New("existing file")
|
err = errors.New("existing file")
|
||||||
} else if err == fs.ErrorIsDir {
|
} else if err == fs.ErrorNotAFile {
|
||||||
err = errors.New("existing directory")
|
err = errors.New("existing directory")
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "not overwriting shortcut target")
|
return nil, errors.Wrap(err, "not overwriting shortcut target")
|
||||||
@@ -3164,7 +3144,7 @@ account.
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
rclone backend [-o config] drives drive:
|
rclone backend drives drive:
|
||||||
|
|
||||||
This will return a JSON list of objects like this
|
This will return a JSON list of objects like this
|
||||||
|
|
||||||
@@ -3181,22 +3161,6 @@ This will return a JSON list of objects like this
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
With the -o config parameter it will output the list in a format
|
|
||||||
suitable for adding to a config file to make aliases for all the
|
|
||||||
drives found.
|
|
||||||
|
|
||||||
[My Drive]
|
|
||||||
type = alias
|
|
||||||
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
|
||||||
|
|
||||||
[Test Drive]
|
|
||||||
type = alias
|
|
||||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
|
||||||
|
|
||||||
Adding this to the rclone config file will cause those team drives to
|
|
||||||
be accessible with the aliases shown. This may require manual editing
|
|
||||||
of the names.
|
|
||||||
|
|
||||||
`,
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "untrash",
|
Name: "untrash",
|
||||||
@@ -3308,21 +3272,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
}
|
}
|
||||||
return f.makeShortcut(ctx, arg[0], dstFs, arg[1])
|
return f.makeShortcut(ctx, arg[0], dstFs, arg[1])
|
||||||
case "drives":
|
case "drives":
|
||||||
drives, err := f.listTeamDrives(ctx)
|
return f.listTeamDrives(ctx)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if _, ok := opt["config"]; ok {
|
|
||||||
lines := []string{}
|
|
||||||
for _, drive := range drives {
|
|
||||||
lines = append(lines, "")
|
|
||||||
lines = append(lines, fmt.Sprintf("[%s]", drive.Name))
|
|
||||||
lines = append(lines, fmt.Sprintf("type = alias"))
|
|
||||||
lines = append(lines, fmt.Sprintf("remote = %s,team_drive=%s,root_folder_id=:", f.name, drive.Id))
|
|
||||||
}
|
|
||||||
return lines, nil
|
|
||||||
}
|
|
||||||
return drives, nil
|
|
||||||
case "untrash":
|
case "untrash":
|
||||||
dir := ""
|
dir := ""
|
||||||
if len(arg) > 0 {
|
if len(arg) > 0 {
|
||||||
@@ -3670,7 +3620,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
|
|||||||
// Don't retry, return a retry error instead
|
// Don't retry, return a retry error instead
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
|
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
|
||||||
Media(in, googleapi.ContentType(uploadMimeType), googleapi.ChunkSize(0)).
|
Media(in, googleapi.ContentType(uploadMimeType)).
|
||||||
Fields(partialFields).
|
Fields(partialFields).
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
KeepRevisionForever(o.fs.opt.KeepRevisionForever).
|
KeepRevisionForever(o.fs.opt.KeepRevisionForever).
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"mime"
|
"mime"
|
||||||
@@ -18,10 +17,8 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/filter"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fs/sync"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
@@ -464,81 +461,6 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
|
||||||
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
|
||||||
opt := &filter.Opt{}
|
|
||||||
err := opt.MaxAge.Set("1h")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
flt, err := filter.NewFilter(opt)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
defCtx := context.Background()
|
|
||||||
fltCtx := filter.ReplaceConfig(defCtx, flt)
|
|
||||||
|
|
||||||
testCtx1 := fltCtx
|
|
||||||
testCtx2 := filter.SetUseFilter(testCtx1, true)
|
|
||||||
testCtx3, testCancel := context.WithCancel(testCtx2)
|
|
||||||
testCtx4 := filter.SetUseFilter(testCtx3, false)
|
|
||||||
testCancel()
|
|
||||||
assert.False(t, filter.GetUseFilter(testCtx1))
|
|
||||||
assert.True(t, filter.GetUseFilter(testCtx2))
|
|
||||||
assert.True(t, filter.GetUseFilter(testCtx3))
|
|
||||||
assert.False(t, filter.GetUseFilter(testCtx4))
|
|
||||||
|
|
||||||
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), "agequery-testdir")
|
|
||||||
subFsResult, err := fs.NewFs(defCtx, subRemote)
|
|
||||||
require.NoError(t, err)
|
|
||||||
subFs, isDriveFs := subFsResult.(*Fs)
|
|
||||||
require.True(t, isDriveFs)
|
|
||||||
|
|
||||||
tempDir1, err := ioutil.TempDir("", "rclone-drive-agequery1-test")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() {
|
|
||||||
_ = os.RemoveAll(tempDir1)
|
|
||||||
}()
|
|
||||||
tempFs1, err := fs.NewFs(defCtx, tempDir1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
tempDir2, err := ioutil.TempDir("", "rclone-drive-agequery2-test")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() {
|
|
||||||
_ = os.RemoveAll(tempDir2)
|
|
||||||
}()
|
|
||||||
tempFs2, err := fs.NewFs(defCtx, tempDir2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
|
|
||||||
_, _ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
|
|
||||||
|
|
||||||
// validate sync/copy
|
|
||||||
const timeQuery = "(modifiedTime >= '"
|
|
||||||
|
|
||||||
assert.NoError(t, sync.CopyDir(defCtx, subFs, tempFs1, false))
|
|
||||||
assert.NotContains(t, subFs.lastQuery, timeQuery)
|
|
||||||
|
|
||||||
assert.NoError(t, sync.CopyDir(fltCtx, subFs, tempFs1, false))
|
|
||||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
|
||||||
|
|
||||||
assert.NoError(t, sync.CopyDir(fltCtx, tempFs2, subFs, false))
|
|
||||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
|
||||||
|
|
||||||
assert.NoError(t, sync.CopyDir(defCtx, tempFs2, subFs, false))
|
|
||||||
assert.NotContains(t, subFs.lastQuery, timeQuery)
|
|
||||||
|
|
||||||
// validate list/walk
|
|
||||||
devNull, errOpen := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
|
|
||||||
require.NoError(t, errOpen)
|
|
||||||
defer func() {
|
|
||||||
_ = devNull.Close()
|
|
||||||
}()
|
|
||||||
|
|
||||||
assert.NoError(t, operations.List(defCtx, subFs, devNull))
|
|
||||||
assert.NotContains(t, subFs.lastQuery, timeQuery)
|
|
||||||
|
|
||||||
assert.NoError(t, operations.List(fltCtx, subFs, devNull))
|
|
||||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
// These tests all depend on each other so run them as nested tests
|
// These tests all depend on each other so run them as nested tests
|
||||||
t.Run("DocumentImport", func(t *testing.T) {
|
t.Run("DocumentImport", func(t *testing.T) {
|
||||||
@@ -556,7 +478,6 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||||
t.Run("CopyID", f.InternalTestCopyID)
|
t.Run("CopyID", f.InternalTestCopyID)
|
||||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -1,358 +0,0 @@
|
|||||||
// This file contains the implementation of the sync batcher for uploads
|
|
||||||
//
|
|
||||||
// Dropbox rules say you can start as many batches as you want, but
|
|
||||||
// you may only have one batch being committed and must wait for the
|
|
||||||
// batch to be finished before committing another.
|
|
||||||
|
|
||||||
package dropbox
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxBatchSize = 1000 // max size the batch can be
|
|
||||||
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
|
|
||||||
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
|
|
||||||
defaultBatchSizeAsync = 100 // default batch size if async
|
|
||||||
)
|
|
||||||
|
|
||||||
// batcher holds info about the current items waiting for upload
|
|
||||||
type batcher struct {
|
|
||||||
f *Fs // Fs this batch is part of
|
|
||||||
mode string // configured batch mode
|
|
||||||
size int // maximum size for batch
|
|
||||||
timeout time.Duration // idle timeout for batch
|
|
||||||
async bool // whether we are using async batching
|
|
||||||
in chan batcherRequest // incoming items to batch
|
|
||||||
closed chan struct{} // close to indicate batcher shut down
|
|
||||||
atexit atexit.FnHandle // atexit handle
|
|
||||||
shutOnce sync.Once // make sure we shutdown once only
|
|
||||||
wg sync.WaitGroup // wait for shutdown
|
|
||||||
}
|
|
||||||
|
|
||||||
// batcherRequest holds an incoming request with a place for a reply
|
|
||||||
type batcherRequest struct {
|
|
||||||
commitInfo *files.UploadSessionFinishArg
|
|
||||||
result chan<- batcherResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return true if batcherRequest is the quit request
|
|
||||||
func (br *batcherRequest) isQuit() bool {
|
|
||||||
return br.commitInfo == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send this to get the engine to quit
|
|
||||||
var quitRequest = batcherRequest{}
|
|
||||||
|
|
||||||
// batcherResponse holds a response to be delivered to clients waiting
|
|
||||||
// for a batch to complete.
|
|
||||||
type batcherResponse struct {
|
|
||||||
err error
|
|
||||||
entry *files.FileMetadata
|
|
||||||
}
|
|
||||||
|
|
||||||
// newBatcher creates a new batcher structure
|
|
||||||
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
|
|
||||||
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
|
|
||||||
if size > maxBatchSize || size < 0 {
|
|
||||||
return nil, errors.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
async := false
|
|
||||||
|
|
||||||
switch mode {
|
|
||||||
case "sync":
|
|
||||||
if size <= 0 {
|
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
size = ci.Transfers
|
|
||||||
}
|
|
||||||
if timeout <= 0 {
|
|
||||||
timeout = defaultTimeoutSync
|
|
||||||
}
|
|
||||||
case "async":
|
|
||||||
if size <= 0 {
|
|
||||||
size = defaultBatchSizeAsync
|
|
||||||
}
|
|
||||||
if timeout <= 0 {
|
|
||||||
timeout = defaultTimeoutAsync
|
|
||||||
}
|
|
||||||
async = true
|
|
||||||
case "off":
|
|
||||||
size = 0
|
|
||||||
default:
|
|
||||||
return nil, errors.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
b := &batcher{
|
|
||||||
f: f,
|
|
||||||
mode: mode,
|
|
||||||
size: size,
|
|
||||||
timeout: timeout,
|
|
||||||
async: async,
|
|
||||||
in: make(chan batcherRequest, size),
|
|
||||||
closed: make(chan struct{}),
|
|
||||||
}
|
|
||||||
if b.Batching() {
|
|
||||||
b.atexit = atexit.Register(b.Shutdown)
|
|
||||||
b.wg.Add(1)
|
|
||||||
go b.commitLoop(context.Background())
|
|
||||||
}
|
|
||||||
return b, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Batching returns true if batching is active
|
|
||||||
func (b *batcher) Batching() bool {
|
|
||||||
return b.size > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
|
||||||
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
|
|
||||||
var arg = &files.UploadSessionFinishBatchArg{
|
|
||||||
Entries: items,
|
|
||||||
}
|
|
||||||
err = b.f.pacer.Call(func() (bool, error) {
|
|
||||||
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
|
|
||||||
// If error is insufficient space then don't retry
|
|
||||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
|
||||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
|
||||||
err = fserrors.NoRetryError(err)
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// after the first chunk is uploaded, we retry everything
|
|
||||||
return err != nil, err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "batch commit failed")
|
|
||||||
}
|
|
||||||
return batchStatus, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
|
||||||
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
|
|
||||||
if launchBatchStatus.AsyncJobId == "" {
|
|
||||||
return nil, errors.New("wait for batch completion: empty job ID")
|
|
||||||
}
|
|
||||||
var batchStatus *files.UploadSessionFinishBatchJobStatus
|
|
||||||
sleepTime := 100 * time.Millisecond
|
|
||||||
const maxSleepTime = 1 * time.Second
|
|
||||||
startTime := time.Now()
|
|
||||||
try := 1
|
|
||||||
for {
|
|
||||||
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
|
|
||||||
if remaining < 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
err = b.f.pacer.Call(func() (bool, error) {
|
|
||||||
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
|
|
||||||
AsyncJobId: launchBatchStatus.AsyncJobId,
|
|
||||||
})
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
|
|
||||||
} else {
|
|
||||||
if batchStatus.Tag == "complete" {
|
|
||||||
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
|
|
||||||
return batchStatus.Complete, nil
|
|
||||||
}
|
|
||||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
|
|
||||||
}
|
|
||||||
time.Sleep(sleepTime)
|
|
||||||
sleepTime *= 2
|
|
||||||
if sleepTime > maxSleepTime {
|
|
||||||
sleepTime = maxSleepTime
|
|
||||||
}
|
|
||||||
try++
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
err = errors.New("batch didn't complete")
|
|
||||||
}
|
|
||||||
return nil, errors.Wrapf(err, "wait for batch failed after %d tries in %v", try, time.Since(startTime))
|
|
||||||
}
|
|
||||||
|
|
||||||
// commit a batch
|
|
||||||
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
|
|
||||||
// If commit fails then signal clients if sync
|
|
||||||
var signalled = b.async
|
|
||||||
defer func() {
|
|
||||||
if err != nil && signalled {
|
|
||||||
// Signal to clients that there was an error
|
|
||||||
for _, result := range results {
|
|
||||||
result <- batcherResponse{err: err}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
|
|
||||||
fs.Debugf(b.f, "Committing %s", desc)
|
|
||||||
|
|
||||||
// finalise the batch getting either a result or a job id to poll
|
|
||||||
batchStatus, err := b.finishBatch(ctx, items)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// check whether batch is complete
|
|
||||||
var complete *files.UploadSessionFinishBatchResult
|
|
||||||
switch batchStatus.Tag {
|
|
||||||
case "async_job_id":
|
|
||||||
// wait for batch to complete
|
|
||||||
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case "complete":
|
|
||||||
complete = batchStatus.Complete
|
|
||||||
default:
|
|
||||||
return errors.Errorf("batch returned unknown status %q", batchStatus.Tag)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check we got the right number of entries
|
|
||||||
entries := complete.Entries
|
|
||||||
if len(entries) != len(results) {
|
|
||||||
return errors.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Report results to clients
|
|
||||||
var (
|
|
||||||
errorTag = ""
|
|
||||||
errorCount = 0
|
|
||||||
)
|
|
||||||
for i := range results {
|
|
||||||
item := entries[i]
|
|
||||||
resp := batcherResponse{}
|
|
||||||
if item.Tag == "success" {
|
|
||||||
resp.entry = item.Success
|
|
||||||
} else {
|
|
||||||
errorCount++
|
|
||||||
errorTag = item.Tag
|
|
||||||
if item.Failure != nil {
|
|
||||||
errorTag = item.Failure.Tag
|
|
||||||
if item.Failure.LookupFailed != nil {
|
|
||||||
errorTag += "/" + item.Failure.LookupFailed.Tag
|
|
||||||
}
|
|
||||||
if item.Failure.Path != nil {
|
|
||||||
errorTag += "/" + item.Failure.Path.Tag
|
|
||||||
}
|
|
||||||
if item.Failure.PropertiesError != nil {
|
|
||||||
errorTag += "/" + item.Failure.PropertiesError.Tag
|
|
||||||
}
|
|
||||||
}
|
|
||||||
resp.err = errors.Errorf("batch upload failed: %s", errorTag)
|
|
||||||
}
|
|
||||||
if !b.async {
|
|
||||||
results[i] <- resp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Show signalled so no need to report error to clients from now on
|
|
||||||
signalled = true
|
|
||||||
|
|
||||||
// Report an error if any failed in the batch
|
|
||||||
if errorTag != "" {
|
|
||||||
return errors.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(b.f, "Committed %s", desc)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// commitLoop runs the commit engine in the background
|
|
||||||
func (b *batcher) commitLoop(ctx context.Context) {
|
|
||||||
var (
|
|
||||||
items []*files.UploadSessionFinishArg // current batch of uncommitted files
|
|
||||||
results []chan<- batcherResponse // current batch of clients awaiting results
|
|
||||||
idleTimer = time.NewTimer(b.timeout)
|
|
||||||
commit = func() {
|
|
||||||
err := b.commitBatch(ctx, items, results)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
|
|
||||||
}
|
|
||||||
items, results = nil, nil
|
|
||||||
}
|
|
||||||
)
|
|
||||||
defer b.wg.Done()
|
|
||||||
defer idleTimer.Stop()
|
|
||||||
idleTimer.Stop()
|
|
||||||
|
|
||||||
outer:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case req := <-b.in:
|
|
||||||
if req.isQuit() {
|
|
||||||
break outer
|
|
||||||
}
|
|
||||||
items = append(items, req.commitInfo)
|
|
||||||
results = append(results, req.result)
|
|
||||||
idleTimer.Stop()
|
|
||||||
if len(items) >= b.size {
|
|
||||||
commit()
|
|
||||||
} else {
|
|
||||||
idleTimer.Reset(b.timeout)
|
|
||||||
}
|
|
||||||
case <-idleTimer.C:
|
|
||||||
if len(items) > 0 {
|
|
||||||
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
|
|
||||||
commit()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
// commit any remaining items
|
|
||||||
if len(items) > 0 {
|
|
||||||
commit()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown finishes any pending batches then shuts everything down
|
|
||||||
//
|
|
||||||
// Can be called from atexit handler
|
|
||||||
func (b *batcher) Shutdown() {
|
|
||||||
b.shutOnce.Do(func() {
|
|
||||||
atexit.Unregister(b.atexit)
|
|
||||||
fs.Infof(b.f, "Commiting uploads - please wait...")
|
|
||||||
// show that batcher is shutting down
|
|
||||||
close(b.closed)
|
|
||||||
// quit the commitLoop by sending a quitRequest message
|
|
||||||
//
|
|
||||||
// Note that we don't close b.in because that will
|
|
||||||
// cause write to closed channel in Commit when we are
|
|
||||||
// exiting due to a signal.
|
|
||||||
b.in <- quitRequest
|
|
||||||
b.wg.Wait()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit commits the file using a batch call, first adding it to the
|
|
||||||
// batch and then waiting for the batch to complete in a synchronous
|
|
||||||
// way if async is not set.
|
|
||||||
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
|
|
||||||
select {
|
|
||||||
case <-b.closed:
|
|
||||||
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
|
|
||||||
resp := make(chan batcherResponse, 1)
|
|
||||||
b.in <- batcherRequest{
|
|
||||||
commitInfo: commitInfo,
|
|
||||||
result: resp,
|
|
||||||
}
|
|
||||||
// If running async then don't wait for the result
|
|
||||||
if b.async {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
result := <-resp
|
|
||||||
return result.entry, result.err
|
|
||||||
}
|
|
||||||
@@ -25,19 +25,20 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/auth"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/common"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/sharing"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/team"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@@ -64,9 +65,9 @@ const (
|
|||||||
// Upload chunk size - setting too small makes uploads slow.
|
// Upload chunk size - setting too small makes uploads slow.
|
||||||
// Chunks are buffered into memory for retries.
|
// Chunks are buffered into memory for retries.
|
||||||
//
|
//
|
||||||
// Speed vs chunk size uploading a 1 GiB file on 2017-11-22
|
// Speed vs chunk size uploading a 1 GB file on 2017-11-22
|
||||||
//
|
//
|
||||||
// Chunk Size MiB, Speed MiB/s, % of max
|
// Chunk Size MB, Speed Mbyte/s, % of max
|
||||||
// 1 1.364 11%
|
// 1 1.364 11%
|
||||||
// 2 2.443 19%
|
// 2 2.443 19%
|
||||||
// 4 4.288 33%
|
// 4 4.288 33%
|
||||||
@@ -81,11 +82,11 @@ const (
|
|||||||
// 96 12.302 95%
|
// 96 12.302 95%
|
||||||
// 128 12.945 100%
|
// 128 12.945 100%
|
||||||
//
|
//
|
||||||
// Choose 48 MiB which is 91% of Maximum speed. rclone by
|
// Choose 48MB which is 91% of Maximum speed. rclone by
|
||||||
// default does 4 transfers so this should use 4*48 MiB = 192 MiB
|
// default does 4 transfers so this should use 4*48MB = 192MB
|
||||||
// by default.
|
// by default.
|
||||||
defaultChunkSize = 48 * fs.Mebi
|
defaultChunkSize = 48 * fs.MebiByte
|
||||||
maxChunkSize = 150 * fs.Mebi
|
maxChunkSize = 150 * fs.MebiByte
|
||||||
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
|
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
|
||||||
maxFileNameLength = 255
|
maxFileNameLength = 255
|
||||||
)
|
)
|
||||||
@@ -98,10 +99,8 @@ var (
|
|||||||
"files.content.write",
|
"files.content.write",
|
||||||
"files.content.read",
|
"files.content.read",
|
||||||
"sharing.write",
|
"sharing.write",
|
||||||
"account_info.read", // needed for About
|
|
||||||
// "file_requests.write",
|
// "file_requests.write",
|
||||||
// "members.read", // needed for impersonate - but causes app to need to be approved by Dropbox Team Admin during the flow
|
// "members.read", // needed for impersonate - but causes app to need to be approved by Dropbox Team Admin during the flow
|
||||||
// "team_data.member"
|
|
||||||
},
|
},
|
||||||
// Endpoint: oauth2.Endpoint{
|
// Endpoint: oauth2.Endpoint{
|
||||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||||
@@ -131,36 +130,39 @@ func getOauthConfig(m configmap.Mapper) *oauth2.Config {
|
|||||||
}
|
}
|
||||||
// Make a copy of the config
|
// Make a copy of the config
|
||||||
config := *dropboxConfig
|
config := *dropboxConfig
|
||||||
// Make a copy of the scopes with extra scopes requires appended
|
// Make a copy of the scopes with "members.read" appended
|
||||||
config.Scopes = append(config.Scopes, "members.read", "team_data.member")
|
config.Scopes = append(config.Scopes, "members.read")
|
||||||
return &config
|
return &config
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
DbHashType = hash.RegisterHash("dropbox", "DropboxHash", 64, dbhash.New)
|
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "dropbox",
|
Name: "dropbox",
|
||||||
Description: "Dropbox",
|
Description: "Dropbox",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
opt := oauthutil.Options{
|
||||||
OAuth2Config: getOauthConfig(m),
|
|
||||||
NoOffline: true,
|
NoOffline: true,
|
||||||
OAuth2Opts: []oauth2.AuthCodeOption{
|
OAuth2Opts: []oauth2.AuthCodeOption{
|
||||||
oauth2.SetAuthURLParam("token_access_type", "offline"),
|
oauth2.SetAuthURLParam("token_access_type", "offline"),
|
||||||
},
|
},
|
||||||
})
|
}
|
||||||
|
err := oauthutil.Config(ctx, "dropbox", name, m, getOauthConfig(m), &opt)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
|
}
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Help: fmt.Sprintf(`Upload chunk size (< %v).
|
Help: fmt.Sprintf(`Upload chunk size. (< %v).
|
||||||
|
|
||||||
Any files larger than this will be uploaded in chunks of this size.
|
Any files larger than this will be uploaded in chunks of this size.
|
||||||
|
|
||||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
Note that chunks are buffered in memory (one at a time) so rclone can
|
||||||
deal with retries. Setting this larger will increase the speed
|
deal with retries. Setting this larger will increase the speed
|
||||||
slightly (at most 10%% for 128 MiB in tests) at the cost of using more
|
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
||||||
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||||
Default: defaultChunkSize,
|
Default: defaultChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
@@ -209,68 +211,6 @@ Note that we don't unmount the shared folder afterwards so the
|
|||||||
shared folder.`,
|
shared folder.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "batch_mode",
|
|
||||||
Help: `Upload file batching sync|async|off.
|
|
||||||
|
|
||||||
This sets the batch mode used by rclone.
|
|
||||||
|
|
||||||
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
|
|
||||||
|
|
||||||
This has 3 possible values
|
|
||||||
|
|
||||||
- off - no batching
|
|
||||||
- sync - batch uploads and check completion (default)
|
|
||||||
- async - batch upload and don't check completion
|
|
||||||
|
|
||||||
Rclone will close any outstanding batches when it exits which may make
|
|
||||||
a delay on quit.
|
|
||||||
`,
|
|
||||||
Default: "sync",
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "batch_size",
|
|
||||||
Help: `Max number of files in upload batch.
|
|
||||||
|
|
||||||
This sets the batch size of files to upload. It has to be less than 1000.
|
|
||||||
|
|
||||||
By default this is 0 which means rclone which calculate the batch size
|
|
||||||
depending on the setting of batch_mode.
|
|
||||||
|
|
||||||
- batch_mode: async - default batch_size is 100
|
|
||||||
- batch_mode: sync - default batch_size is the same as --transfers
|
|
||||||
- batch_mode: off - not in use
|
|
||||||
|
|
||||||
Rclone will close any outstanding batches when it exits which may make
|
|
||||||
a delay on quit.
|
|
||||||
|
|
||||||
Setting this is a great idea if you are uploading lots of small files
|
|
||||||
as it will make them a lot quicker. You can use --transfers 32 to
|
|
||||||
maximise throughput.
|
|
||||||
`,
|
|
||||||
Default: 0,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "batch_timeout",
|
|
||||||
Help: `Max time to allow an idle upload batch before uploading.
|
|
||||||
|
|
||||||
If an upload batch is idle for more than this long then it will be
|
|
||||||
uploaded.
|
|
||||||
|
|
||||||
The default for this is 0 which means rclone will choose a sensible
|
|
||||||
default based on the batch_mode in use.
|
|
||||||
|
|
||||||
- batch_mode: async - default batch_timeout is 500ms
|
|
||||||
- batch_mode: sync - default batch_timeout is 10s
|
|
||||||
- batch_mode: off - not in use
|
|
||||||
`,
|
|
||||||
Default: fs.Duration(0),
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "batch_commit_timeout",
|
|
||||||
Help: `Max time to wait for a batch to finish comitting`,
|
|
||||||
Default: fs.Duration(10 * time.Minute),
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -294,11 +234,6 @@ type Options struct {
|
|||||||
Impersonate string `config:"impersonate"`
|
Impersonate string `config:"impersonate"`
|
||||||
SharedFiles bool `config:"shared_files"`
|
SharedFiles bool `config:"shared_files"`
|
||||||
SharedFolders bool `config:"shared_folders"`
|
SharedFolders bool `config:"shared_folders"`
|
||||||
BatchMode string `config:"batch_mode"`
|
|
||||||
BatchSize int `config:"batch_size"`
|
|
||||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
|
||||||
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
|
|
||||||
AsyncBatch bool `config:"async_batch"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -318,7 +253,6 @@ type Fs struct {
|
|||||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||||
pacer *fs.Pacer // To pace the API calls
|
pacer *fs.Pacer // To pace the API calls
|
||||||
ns string // The namespace we are using or "" for none
|
ns string // The namespace we are using or "" for none
|
||||||
batcher *batcher // batch builder
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a dropbox object
|
// Object describes a dropbox object
|
||||||
@@ -334,6 +268,8 @@ type Object struct {
|
|||||||
hash string // content_hash of the object
|
hash string // content_hash of the object
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
// Name of the remote (as passed into NewFs)
|
||||||
func (f *Fs) Name() string {
|
func (f *Fs) Name() string {
|
||||||
return f.name
|
return f.name
|
||||||
@@ -387,7 +323,7 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
const minChunkSize = fs.SizeSuffixBase
|
const minChunkSize = fs.Byte
|
||||||
if cs < minChunkSize {
|
if cs < minChunkSize {
|
||||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||||
}
|
}
|
||||||
@@ -444,10 +380,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
ci: ci,
|
ci: ci,
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cfg := dropbox.Config{
|
cfg := dropbox.Config{
|
||||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||||
Client: oAuthClient, // maybe???
|
Client: oAuthClient, // maybe???
|
||||||
@@ -580,7 +512,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// headerGenerator for dropbox sdk
|
// headerGenerator for dropbox sdk
|
||||||
func (f *Fs) headerGenerator(hostType string, namespace string, route string) map[string]string {
|
func (f *Fs) headerGenerator(hostType string, style string, namespace string, route string) map[string]string {
|
||||||
if f.ns == "" {
|
if f.ns == "" {
|
||||||
return map[string]string{}
|
return map[string]string{}
|
||||||
}
|
}
|
||||||
@@ -630,9 +562,6 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *fi
|
|||||||
}
|
}
|
||||||
fileInfo, ok := entry.(*files.FileMetadata)
|
fileInfo, ok := entry.(*files.FileMetadata)
|
||||||
if !ok {
|
if !ok {
|
||||||
if _, ok = entry.(*files.FolderMetadata); ok {
|
|
||||||
return nil, fs.ErrorIsDir
|
|
||||||
}
|
|
||||||
return nil, fs.ErrorNotAFile
|
return nil, fs.ErrorNotAFile
|
||||||
}
|
}
|
||||||
return fileInfo, nil
|
return fileInfo, nil
|
||||||
@@ -794,7 +723,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
|||||||
fs: f,
|
fs: f,
|
||||||
url: entry.PreviewUrl,
|
url: entry.PreviewUrl,
|
||||||
remote: entryPath,
|
remote: entryPath,
|
||||||
modTime: *entry.TimeInvited,
|
modTime: entry.TimeInvited,
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1169,7 +1098,14 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
}
|
}
|
||||||
if expire < fs.DurationOff {
|
if expire < fs.DurationOff {
|
||||||
expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
|
expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
|
||||||
createArg.Settings.Expires = &expiryTime
|
createArg.Settings.Expires = expiryTime
|
||||||
|
}
|
||||||
|
// FIXME note we can't set Settings for non enterprise dropbox
|
||||||
|
// because of https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75
|
||||||
|
// however this only goes wrong when we set Expires, so as a
|
||||||
|
// work-around remove Settings unless expire is set.
|
||||||
|
if expire == fs.DurationOff {
|
||||||
|
createArg.Settings = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var linkRes sharing.IsSharedLinkMetadata
|
var linkRes sharing.IsSharedLinkMetadata
|
||||||
@@ -1415,13 +1351,13 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
|||||||
switch info := entry.(type) {
|
switch info := entry.(type) {
|
||||||
case *files.FolderMetadata:
|
case *files.FolderMetadata:
|
||||||
entryType = fs.EntryDirectory
|
entryType = fs.EntryDirectory
|
||||||
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
|
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||||
case *files.FileMetadata:
|
case *files.FileMetadata:
|
||||||
entryType = fs.EntryObject
|
entryType = fs.EntryObject
|
||||||
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
|
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||||
case *files.DeletedMetadata:
|
case *files.DeletedMetadata:
|
||||||
entryType = fs.EntryObject
|
entryType = fs.EntryObject
|
||||||
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
|
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||||
default:
|
default:
|
||||||
fs.Errorf(entry, "dropbox ChangeNotify: ignoring unknown EntryType %T", entry)
|
fs.Errorf(entry, "dropbox ChangeNotify: ignoring unknown EntryType %T", entry)
|
||||||
continue
|
continue
|
||||||
@@ -1443,13 +1379,6 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
return hash.Set(DbHashType)
|
return hash.Set(DbHashType)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown the backend, closing any background tasks and any
|
|
||||||
// cached connections.
|
|
||||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
|
||||||
f.batcher.Shutdown()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
// Fs returns the parent Fs
|
||||||
@@ -1609,83 +1538,97 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
|
|
||||||
// uploadChunked uploads the object in parts
|
// uploadChunked uploads the object in parts
|
||||||
//
|
//
|
||||||
// Will introduce two additional network requests to start and finish the session.
|
// Will work optimally if size is >= uploadChunkSize. If the size is either
|
||||||
// If the size is unknown (i.e. -1) the method incurs one additional
|
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
|
||||||
// request to the Dropbox API that does not carry a payload to close the append session.
|
// avoidable request to the Dropbox API that does not carry payload.
|
||||||
func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
|
func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
|
||||||
// start upload
|
|
||||||
var res *files.UploadSessionStartResult
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, nil)
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||||
chunks, remainder := size/chunkSize, size%chunkSize
|
chunks := 0
|
||||||
if remainder > 0 {
|
if size != -1 {
|
||||||
chunks++
|
chunks = int(size/chunkSize) + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// write chunks
|
|
||||||
in := readers.NewCountingReader(in0)
|
in := readers.NewCountingReader(in0)
|
||||||
buf := make([]byte, int(chunkSize))
|
buf := make([]byte, int(chunkSize))
|
||||||
cursor := files.UploadSessionCursor{
|
|
||||||
SessionId: res.SessionId,
|
|
||||||
Offset: 0,
|
|
||||||
}
|
|
||||||
appendArg := files.UploadSessionAppendArg{Cursor: &cursor}
|
|
||||||
for currentChunk := 1; ; currentChunk++ {
|
|
||||||
cursor.Offset = in.BytesRead()
|
|
||||||
|
|
||||||
if chunks < 0 {
|
fmtChunk := func(cur int, last bool) {
|
||||||
fs.Debugf(o, "Streaming chunk %d/unknown", currentChunk)
|
if chunks == 0 && last {
|
||||||
|
fs.Debugf(o, "Streaming chunk %d/%d", cur, cur)
|
||||||
|
} else if chunks == 0 {
|
||||||
|
fs.Debugf(o, "Streaming chunk %d/unknown", cur)
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(o, "Uploading chunk %d/%d", currentChunk, chunks)
|
fs.Debugf(o, "Uploading chunk %d/%d", cur, chunks)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// write the first chunk
|
||||||
|
fmtChunk(1, false)
|
||||||
|
var res *files.UploadSessionStartResult
|
||||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||||
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
// seek to the start in case this is a retry
|
||||||
|
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cursor := files.UploadSessionCursor{
|
||||||
|
SessionId: res.SessionId,
|
||||||
|
Offset: 0,
|
||||||
|
}
|
||||||
|
appendArg := files.UploadSessionAppendArg{
|
||||||
|
Cursor: &cursor,
|
||||||
|
Close: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
// write more whole chunks (if any)
|
||||||
|
currentChunk := 2
|
||||||
|
for {
|
||||||
|
if chunks > 0 && currentChunk >= chunks {
|
||||||
|
// if the size is known, only upload full chunks. Remaining bytes are uploaded with
|
||||||
|
// the UploadSessionFinish request.
|
||||||
|
break
|
||||||
|
} else if chunks == 0 && in.BytesRead()-cursor.Offset < uint64(chunkSize) {
|
||||||
|
// if the size is unknown, upload as long as we can read full chunks from the reader.
|
||||||
|
// The UploadSessionFinish request will not contain any payload.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
cursor.Offset = in.BytesRead()
|
||||||
|
fmtChunk(currentChunk, false)
|
||||||
|
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
// seek to the start in case this is a retry
|
// seek to the start in case this is a retry
|
||||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
||||||
// after session is started, we retry everything
|
// after the first chunk is uploaded, we retry everything
|
||||||
return err != nil, err
|
return err != nil, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if appendArg.Close {
|
currentChunk++
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if size > 0 {
|
// write the remains
|
||||||
// if size is known, check if next chunk is final
|
|
||||||
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
|
|
||||||
} else {
|
|
||||||
// if size is unknown, upload as long as we can read full chunks from the reader
|
|
||||||
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// finish upload
|
|
||||||
cursor.Offset = in.BytesRead()
|
cursor.Offset = in.BytesRead()
|
||||||
args := &files.UploadSessionFinishArg{
|
args := &files.UploadSessionFinishArg{
|
||||||
Cursor: &cursor,
|
Cursor: &cursor,
|
||||||
Commit: commitInfo,
|
Commit: commitInfo,
|
||||||
}
|
}
|
||||||
// If we are batching then we should have written all the data now
|
fmtChunk(currentChunk, true)
|
||||||
// store the commit info now for a batch commit
|
chunk = readers.NewRepeatableReaderBuffer(in, buf)
|
||||||
if o.fs.batcher.Batching() {
|
|
||||||
return o.fs.batcher.Commit(ctx, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
|
// seek to the start in case this is a retry
|
||||||
|
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
|
||||||
// If error is insufficient space then don't retry
|
// If error is insufficient space then don't retry
|
||||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||||
@@ -1743,8 +1686,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
|
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
|
||||||
commitInfo.Mode.Tag = "overwrite"
|
commitInfo.Mode.Tag = "overwrite"
|
||||||
// The Dropbox API only accepts timestamps in UTC with second precision.
|
// The Dropbox API only accepts timestamps in UTC with second precision.
|
||||||
clientModified := src.ModTime(ctx).UTC().Round(time.Second)
|
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
|
||||||
commitInfo.ClientModified = &clientModified
|
|
||||||
// Don't attempt to create filenames that are too long
|
// Don't attempt to create filenames that are too long
|
||||||
if cErr := checkPathLength(commitInfo.Path); cErr != nil {
|
if cErr := checkPathLength(commitInfo.Path); cErr != nil {
|
||||||
return cErr
|
return cErr
|
||||||
@@ -1753,7 +1695,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
size := src.Size()
|
size := src.Size()
|
||||||
var err error
|
var err error
|
||||||
var entry *files.FileMetadata
|
var entry *files.FileMetadata
|
||||||
if size > int64(o.fs.opt.ChunkSize) || size < 0 || o.fs.batcher.Batching() {
|
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||||
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
|
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
|
||||||
} else {
|
} else {
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
@@ -1764,15 +1706,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload failed")
|
return errors.Wrap(err, "upload failed")
|
||||||
}
|
}
|
||||||
// If we haven't received data back from batch upload then fake it
|
|
||||||
//
|
|
||||||
// This will only happen if we are uploading async batches
|
|
||||||
if entry == nil {
|
|
||||||
o.bytes = size
|
|
||||||
o.modTime = *commitInfo.ClientModified
|
|
||||||
o.hash = "" // we don't have this
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return o.setMetadataFromEntry(entry)
|
return o.setMetadataFromEntry(entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1800,7 +1733,6 @@ var (
|
|||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = &Fs{}
|
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.IDer = (*Object)(nil)
|
_ fs.IDer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -87,16 +86,10 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
|
|||||||
return &file, err
|
return &file, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// maybe do some actual validation later if necessary
|
|
||||||
func validToken(token *GetTokenResponse) bool {
|
|
||||||
return token.Status == "OK"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
||||||
request := DownloadRequest{
|
request := DownloadRequest{
|
||||||
URL: url,
|
URL: url,
|
||||||
Single: 1,
|
Single: 1,
|
||||||
Pass: f.opt.FilePassword,
|
|
||||||
}
|
}
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -106,8 +99,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
|||||||
var token GetTokenResponse
|
var token GetTokenResponse
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
||||||
doretry, err := shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
return doretry || !validToken(&token), err
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't list files")
|
return nil, errors.Wrap(err, "couldn't list files")
|
||||||
@@ -130,12 +122,6 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
|
|||||||
RootURL: "https://1fichier.com/dir/",
|
RootURL: "https://1fichier.com/dir/",
|
||||||
Path: id,
|
Path: id,
|
||||||
Parameters: map[string][]string{"json": {"1"}},
|
Parameters: map[string][]string{"json": {"1"}},
|
||||||
ContentType: "application/x-www-form-urlencoded",
|
|
||||||
}
|
|
||||||
if f.opt.FolderPassword != "" {
|
|
||||||
opts.Method = "POST"
|
|
||||||
opts.Parameters = nil
|
|
||||||
opts.Body = strings.NewReader("json=1&pass=" + url.QueryEscape(f.opt.FolderPassword))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var sharedFiles SharedFolderResponse
|
var sharedFiles SharedFolderResponse
|
||||||
@@ -325,7 +311,7 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
|
|||||||
return nil, errors.Wrap(err, "couldn't remove folder")
|
return nil, errors.Wrap(err, "couldn't remove folder")
|
||||||
}
|
}
|
||||||
if response.Status != "OK" {
|
if response.Status != "OK" {
|
||||||
return nil, errors.Errorf("can't remove folder: %s", response.Message)
|
return nil, errors.New("Can't remove non-empty dir")
|
||||||
}
|
}
|
||||||
|
|
||||||
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
||||||
@@ -410,34 +396,6 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
|
|||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) renameFile(ctx context.Context, url string, newName string) (response *RenameFileResponse, err error) {
|
|
||||||
request := &RenameFileRequest{
|
|
||||||
URLs: []RenameFileURL{
|
|
||||||
{
|
|
||||||
URL: url,
|
|
||||||
Filename: newName,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/file/rename.cgi",
|
|
||||||
}
|
|
||||||
|
|
||||||
response = &RenameFileResponse{}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "couldn't rename file")
|
|
||||||
}
|
|
||||||
|
|
||||||
return response, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
|
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
|
||||||
// fs.Debugf(f, "Requesting Upload node")
|
// fs.Debugf(f, "Requesting Upload node")
|
||||||
|
|
||||||
|
|||||||
@@ -35,27 +35,17 @@ func init() {
|
|||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "fichier",
|
Name: "fichier",
|
||||||
Description: "1Fichier",
|
Description: "1Fichier",
|
||||||
|
Config: func(ctx context.Context, name string, config configmap.Mapper) {
|
||||||
|
},
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
||||||
Name: "api_key",
|
Name: "api_key",
|
||||||
}, {
|
}, {
|
||||||
Help: "If you want to download a shared folder, add this parameter.",
|
Help: "If you want to download a shared folder, add this parameter",
|
||||||
Name: "shared_folder",
|
Name: "shared_folder",
|
||||||
Required: false,
|
Required: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Help: "If you want to download a shared file that is password protected, add this parameter.",
|
|
||||||
Name: "file_password",
|
|
||||||
Required: false,
|
|
||||||
Advanced: true,
|
|
||||||
IsPassword: true,
|
|
||||||
}, {
|
|
||||||
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
|
|
||||||
Name: "folder_password",
|
|
||||||
Required: false,
|
|
||||||
Advanced: true,
|
|
||||||
IsPassword: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -89,8 +79,6 @@ func init() {
|
|||||||
type Options struct {
|
type Options struct {
|
||||||
APIKey string `config:"api_key"`
|
APIKey string `config:"api_key"`
|
||||||
SharedFolder string `config:"shared_folder"`
|
SharedFolder string `config:"shared_folder"`
|
||||||
FilePassword string `config:"file_password"`
|
|
||||||
FolderPassword string `config:"folder_password"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -437,30 +425,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find current directory ID
|
|
||||||
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If it is in the correct directory, just rename it
|
|
||||||
var url string
|
|
||||||
if currentDirectoryID == directoryID {
|
|
||||||
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "couldn't rename file")
|
|
||||||
}
|
|
||||||
if resp.Status != "OK" {
|
|
||||||
return nil, errors.Errorf("couldn't rename file: %s", resp.Message)
|
|
||||||
}
|
|
||||||
url = resp.URLs[0].URL
|
|
||||||
} else {
|
|
||||||
folderID, err := strconv.Atoi(directoryID)
|
folderID, err := strconv.Atoi(directoryID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -470,12 +440,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return nil, errors.Wrap(err, "couldn't move file")
|
return nil, errors.Wrap(err, "couldn't move file")
|
||||||
}
|
}
|
||||||
if resp.Status != "OK" {
|
if resp.Status != "OK" {
|
||||||
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
|
return nil, errors.New("couldn't move file")
|
||||||
}
|
|
||||||
url = resp.URLs[0]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err := f.readFileInfo(ctx, url)
|
file, err := f.readFileInfo(ctx, resp.URLs[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("couldn't read file data")
|
return nil, errors.New("couldn't read file data")
|
||||||
}
|
}
|
||||||
@@ -506,7 +474,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return nil, errors.Wrap(err, "couldn't move file")
|
return nil, errors.Wrap(err, "couldn't move file")
|
||||||
}
|
}
|
||||||
if resp.Status != "OK" {
|
if resp.Status != "OK" {
|
||||||
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
|
return nil, errors.New("couldn't move file")
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
|
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ type ListFilesRequest struct {
|
|||||||
type DownloadRequest struct {
|
type DownloadRequest struct {
|
||||||
URL string `json:"url"`
|
URL string `json:"url"`
|
||||||
Single int `json:"single"`
|
Single int `json:"single"`
|
||||||
Pass string `json:"pass,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveFolderRequest is the request structure of the corresponding request
|
// RemoveFolderRequest is the request structure of the corresponding request
|
||||||
@@ -65,7 +64,6 @@ type MoveFileRequest struct {
|
|||||||
// MoveFileResponse is the response structure of the corresponding request
|
// MoveFileResponse is the response structure of the corresponding request
|
||||||
type MoveFileResponse struct {
|
type MoveFileResponse struct {
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Message string `json:"message"`
|
|
||||||
URLs []string `json:"urls"`
|
URLs []string `json:"urls"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -79,7 +77,6 @@ type CopyFileRequest struct {
|
|||||||
// CopyFileResponse is the response structure of the corresponding request
|
// CopyFileResponse is the response structure of the corresponding request
|
||||||
type CopyFileResponse struct {
|
type CopyFileResponse struct {
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Message string `json:"message"`
|
|
||||||
Copied int `json:"copied"`
|
Copied int `json:"copied"`
|
||||||
URLs []FileCopy `json:"urls"`
|
URLs []FileCopy `json:"urls"`
|
||||||
}
|
}
|
||||||
@@ -90,30 +87,6 @@ type FileCopy struct {
|
|||||||
ToURL string `json:"to_url"`
|
ToURL string `json:"to_url"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RenameFileURL is the data structure to rename a single file
|
|
||||||
type RenameFileURL struct {
|
|
||||||
URL string `json:"url"`
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// RenameFileRequest is the request structure of the corresponding request
|
|
||||||
type RenameFileRequest struct {
|
|
||||||
URLs []RenameFileURL `json:"urls"`
|
|
||||||
Pretty int `json:"pretty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// RenameFileResponse is the response structure of the corresponding request
|
|
||||||
type RenameFileResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
Renamed int `json:"renamed"`
|
|
||||||
URLs []struct {
|
|
||||||
URL string `json:"url"`
|
|
||||||
OldFilename string `json:"old_filename"`
|
|
||||||
NewFilename string `json:"new_filename"`
|
|
||||||
} `json:"urls"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUploadNodeResponse is the response structure of the corresponding request
|
// GetUploadNodeResponse is the response structure of the corresponding request
|
||||||
type GetUploadNodeResponse struct {
|
type GetUploadNodeResponse struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -52,46 +51,11 @@ func (t Time) String() string {
|
|||||||
return time.Time(t).UTC().Format(timeFormatParameters)
|
return time.Time(t).UTC().Format(timeFormatParameters)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Int represents an integer which can be represented in JSON as a
|
|
||||||
// quoted integer or an integer.
|
|
||||||
type Int int
|
|
||||||
|
|
||||||
// MarshalJSON turns a Int into JSON
|
|
||||||
func (i *Int) MarshalJSON() (out []byte, err error) {
|
|
||||||
return json.Marshal((*int)(i))
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON turns JSON into a Int
|
|
||||||
func (i *Int) UnmarshalJSON(data []byte) error {
|
|
||||||
if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' {
|
|
||||||
data = data[1 : len(data)-1]
|
|
||||||
}
|
|
||||||
return json.Unmarshal(data, (*int)(i))
|
|
||||||
}
|
|
||||||
|
|
||||||
// String represents an string which can be represented in JSON as a
|
|
||||||
// quoted string or an integer.
|
|
||||||
type String string
|
|
||||||
|
|
||||||
// MarshalJSON turns a String into JSON
|
|
||||||
func (s *String) MarshalJSON() (out []byte, err error) {
|
|
||||||
return json.Marshal((*string)(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON turns JSON into a String
|
|
||||||
func (s *String) UnmarshalJSON(data []byte) error {
|
|
||||||
err := json.Unmarshal(data, (*string)(s))
|
|
||||||
if err != nil {
|
|
||||||
*s = String(data)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Status return returned in all status responses
|
// Status return returned in all status responses
|
||||||
type Status struct {
|
type Status struct {
|
||||||
Code string `json:"status"`
|
Code string `json:"status"`
|
||||||
Message string `json:"statusmessage"`
|
Message string `json:"statusmessage"`
|
||||||
TaskID String `json:"taskid"`
|
TaskID string `json:"taskid"`
|
||||||
// Warning string `json:"warning"` // obsolete
|
// Warning string `json:"warning"` // obsolete
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -151,7 +115,7 @@ type GetFolderContentsResponse struct {
|
|||||||
Total int `json:"total,string"`
|
Total int `json:"total,string"`
|
||||||
Items []Item `json:"filelist"`
|
Items []Item `json:"filelist"`
|
||||||
Folder Item `json:"folder"`
|
Folder Item `json:"folder"`
|
||||||
From Int `json:"from"`
|
From int `json:"from,string"`
|
||||||
//Count int `json:"count"`
|
//Count int `json:"count"`
|
||||||
Pid string `json:"pid"`
|
Pid string `json:"pid"`
|
||||||
RefreshResult Status `json:"refreshresult"`
|
RefreshResult Status `json:"refreshresult"`
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "url",
|
Name: "url",
|
||||||
Help: "URL of the Enterprise File Fabric to connect to.",
|
Help: "URL of the Enterprise File Fabric to connect to",
|
||||||
Required: true,
|
Required: true,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "https://storagemadeeasy.com",
|
Value: "https://storagemadeeasy.com",
|
||||||
@@ -79,15 +79,14 @@ func init() {
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "root_folder_id",
|
Name: "root_folder_id",
|
||||||
Help: `ID of the root folder.
|
Help: `ID of the root folder
|
||||||
|
|
||||||
Leave blank normally.
|
Leave blank normally.
|
||||||
|
|
||||||
Fill in to make rclone start with directory of a given ID.
|
Fill in to make rclone start with directory of a given ID.
|
||||||
`,
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "permanent_token",
|
Name: "permanent_token",
|
||||||
Help: `Permanent Authentication Token.
|
Help: `Permanent Authentication Token
|
||||||
|
|
||||||
A Permanent Authentication Token can be created in the Enterprise File
|
A Permanent Authentication Token can be created in the Enterprise File
|
||||||
Fabric, on the users Dashboard under Security, there is an entry
|
Fabric, on the users Dashboard under Security, there is an entry
|
||||||
@@ -100,7 +99,7 @@ For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
|
|||||||
`,
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "token",
|
Name: "token",
|
||||||
Help: `Session Token.
|
Help: `Session Token
|
||||||
|
|
||||||
This is a session token which rclone caches in the config file. It is
|
This is a session token which rclone caches in the config file. It is
|
||||||
usually valid for 1 hour.
|
usually valid for 1 hour.
|
||||||
@@ -110,14 +109,14 @@ Don't set this value - rclone will set it automatically.
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "token_expiry",
|
Name: "token_expiry",
|
||||||
Help: `Token expiry time.
|
Help: `Token expiry time
|
||||||
|
|
||||||
Don't set this value - rclone will set it automatically.
|
Don't set this value - rclone will set it automatically.
|
||||||
`,
|
`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "version",
|
Name: "version",
|
||||||
Help: `Version read from the file fabric.
|
Help: `Version read from the file fabric
|
||||||
|
|
||||||
Don't set this value - rclone will set it automatically.
|
Don't set this value - rclone will set it automatically.
|
||||||
`,
|
`,
|
||||||
@@ -223,14 +222,13 @@ var retryStatusCodes = []struct {
|
|||||||
// delete in that folder. Please try again later or use
|
// delete in that folder. Please try again later or use
|
||||||
// another name. (error_background)
|
// another name. (error_background)
|
||||||
code: "error_background",
|
code: "error_background",
|
||||||
sleep: 1 * time.Second,
|
sleep: 6 * time.Second,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
// try should be the number of the tries so far, counting up from 1
|
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError) (bool, error) {
|
||||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError, try int) (bool, error) {
|
|
||||||
if fserrors.ContextError(ctx, &err) {
|
if fserrors.ContextError(ctx, &err) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -246,10 +244,9 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
|
|||||||
for _, retryCode := range retryStatusCodes {
|
for _, retryCode := range retryStatusCodes {
|
||||||
if code == retryCode.code {
|
if code == retryCode.code {
|
||||||
if retryCode.sleep > 0 {
|
if retryCode.sleep > 0 {
|
||||||
// make this thread only sleep exponentially increasing extra time
|
// make this thread only sleep extra time
|
||||||
sleepTime := retryCode.sleep << (try - 1)
|
fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", retryCode.sleep, retryCode.code)
|
||||||
fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", sleepTime, retryCode.code)
|
time.Sleep(retryCode.sleep)
|
||||||
time.Sleep(sleepTime)
|
|
||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
@@ -403,13 +400,11 @@ func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKEr
|
|||||||
ContentType: "application/x-www-form-urlencoded",
|
ContentType: "application/x-www-form-urlencoded",
|
||||||
Options: options,
|
Options: options,
|
||||||
}
|
}
|
||||||
try := 0
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
try++
|
|
||||||
// Refresh the body each retry
|
// Refresh the body each retry
|
||||||
opts.Body = strings.NewReader(data.Encode())
|
opts.Body = strings.NewReader(data.Encode())
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, result)
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, result)
|
||||||
return f.shouldRetry(ctx, resp, err, result, try)
|
return f.shouldRetry(ctx, resp, err, result)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, err
|
return resp, err
|
||||||
@@ -844,7 +839,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the the background task to complete if necessary
|
// Wait for the the background task to complete if necessary
|
||||||
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
|
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID string) (err error) {
|
||||||
if taskID == "" || taskID == "0" {
|
if taskID == "" || taskID == "0" {
|
||||||
// No task to wait for
|
// No task to wait for
|
||||||
return nil
|
return nil
|
||||||
@@ -1094,7 +1089,7 @@ func (o *Object) Size() int64 {
|
|||||||
// setMetaData sets the metadata from info
|
// setMetaData sets the metadata from info
|
||||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||||
if info.Type != api.ItemTypeFile {
|
if info.Type != api.ItemTypeFile {
|
||||||
return fs.ErrorIsDir
|
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
||||||
}
|
}
|
||||||
o.hasMetaData = true
|
o.hasMetaData = true
|
||||||
o.size = info.Size
|
o.size = info.Size
|
||||||
@@ -1283,11 +1278,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
var contentLength = size
|
var contentLength = size
|
||||||
opts.ContentLength = &contentLength // NB CallJSON scribbles on this which is naughty
|
opts.ContentLength = &contentLength // NB CallJSON scribbles on this which is naughty
|
||||||
}
|
}
|
||||||
try := 0
|
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
try++
|
|
||||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploader)
|
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploader)
|
||||||
return o.fs.shouldRetry(ctx, resp, err, nil, try)
|
return o.fs.shouldRetry(ctx, resp, err, nil)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to upload")
|
return errors.Wrap(err, "failed to upload")
|
||||||
|
|||||||
@@ -48,23 +48,26 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "host",
|
Name: "host",
|
||||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
Help: "FTP host to connect to",
|
||||||
Required: true,
|
Required: true,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "ftp.example.com",
|
||||||
|
Help: "Connect to ftp.example.com",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "user",
|
Name: "user",
|
||||||
Help: "FTP username, leave blank for current username, " + currentUser + ".",
|
Help: "FTP username, leave blank for current username, " + currentUser,
|
||||||
}, {
|
}, {
|
||||||
Name: "port",
|
Name: "port",
|
||||||
Help: "FTP port, leave blank to use default (21).",
|
Help: "FTP port, leave blank to use default (21)",
|
||||||
}, {
|
}, {
|
||||||
Name: "pass",
|
Name: "pass",
|
||||||
Help: "FTP password.",
|
Help: "FTP password",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "tls",
|
Name: "tls",
|
||||||
Help: `Use Implicit FTPS (FTP over TLS).
|
Help: `Use Implicit FTPS (FTP over TLS)
|
||||||
|
|
||||||
When using implicit FTP over TLS the client connects using TLS
|
When using implicit FTP over TLS the client connects using TLS
|
||||||
right from the start which breaks compatibility with
|
right from the start which breaks compatibility with
|
||||||
non-TLS-aware servers. This is usually served over port 990 rather
|
non-TLS-aware servers. This is usually served over port 990 rather
|
||||||
@@ -72,41 +75,35 @@ than port 21. Cannot be used in combination with explicit FTP.`,
|
|||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "explicit_tls",
|
Name: "explicit_tls",
|
||||||
Help: `Use Explicit FTPS (FTP over TLS).
|
Help: `Use Explicit FTPS (FTP over TLS)
|
||||||
|
|
||||||
When using explicit FTP over TLS the client explicitly requests
|
When using explicit FTP over TLS the client explicitly requests
|
||||||
security from the server in order to upgrade a plain text connection
|
security from the server in order to upgrade a plain text connection
|
||||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "concurrency",
|
Name: "concurrency",
|
||||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
|
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
|
||||||
Default: 0,
|
Default: 0,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "no_check_certificate",
|
Name: "no_check_certificate",
|
||||||
Help: "Do not verify the TLS certificate of the server.",
|
Help: "Do not verify the TLS certificate of the server",
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "disable_epsv",
|
Name: "disable_epsv",
|
||||||
Help: "Disable using EPSV even if server advertises support.",
|
Help: "Disable using EPSV even if server advertises support",
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "disable_mlsd",
|
Name: "disable_mlsd",
|
||||||
Help: "Disable using MLSD even if server advertises support.",
|
Help: "Disable using MLSD even if server advertises support",
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "writing_mdtm",
|
|
||||||
Help: "Use MDTM to set modification time (VsFtpd quirk)",
|
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "idle_timeout",
|
Name: "idle_timeout",
|
||||||
Default: fs.Duration(60 * time.Second),
|
Default: fs.Duration(60 * time.Second),
|
||||||
Help: `Max time before closing idle connections.
|
Help: `Max time before closing idle connections
|
||||||
|
|
||||||
If no connections have been returned to the connection pool in the time
|
If no connections have been returned to the connection pool in the time
|
||||||
given, rclone will empty the connection pool.
|
given, rclone will empty the connection pool.
|
||||||
@@ -119,43 +116,17 @@ Set to 0 to keep connections indefinitely.
|
|||||||
Help: "Maximum time to wait for a response to close.",
|
Help: "Maximum time to wait for a response to close.",
|
||||||
Default: fs.Duration(60 * time.Second),
|
Default: fs.Duration(60 * time.Second),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "tls_cache_size",
|
|
||||||
Help: `Size of TLS session cache for all control and data connections.
|
|
||||||
|
|
||||||
TLS cache allows to resume TLS sessions and reuse PSK between connections.
|
|
||||||
Increase if default size is not enough resulting in TLS resumption errors.
|
|
||||||
Enabled by default. Use 0 to disable.`,
|
|
||||||
Default: 32,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "disable_tls13",
|
|
||||||
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "shut_timeout",
|
|
||||||
Help: "Maximum time to wait for data connection closing status.",
|
|
||||||
Default: fs.Duration(60 * time.Second),
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
// The FTP protocol can't handle trailing spaces
|
// The FTP protocol can't handle trailing spaces (for instance
|
||||||
// (for instance, pureftpd turns them into '_')
|
// pureftpd turns them into _)
|
||||||
|
//
|
||||||
|
// proftpd can't handle '*' in file names
|
||||||
|
// pureftpd can't handle '[', ']' or '*'
|
||||||
Default: (encoder.Display |
|
Default: (encoder.Display |
|
||||||
encoder.EncodeRightSpace),
|
encoder.EncodeRightSpace),
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "Asterisk,Ctl,Dot,Slash",
|
|
||||||
Help: "ProFTPd can't handle '*' in file names",
|
|
||||||
}, {
|
|
||||||
Value: "BackSlash,Ctl,Del,Dot,RightSpace,Slash,SquareBracket",
|
|
||||||
Help: "PureFTPd can't handle '[]' or '*' in file names",
|
|
||||||
}, {
|
|
||||||
Value: "Ctl,LeftPeriod,Slash",
|
|
||||||
Help: "VsFTPd can't handle file names starting with dot",
|
|
||||||
}},
|
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -168,16 +139,12 @@ type Options struct {
|
|||||||
Port string `config:"port"`
|
Port string `config:"port"`
|
||||||
TLS bool `config:"tls"`
|
TLS bool `config:"tls"`
|
||||||
ExplicitTLS bool `config:"explicit_tls"`
|
ExplicitTLS bool `config:"explicit_tls"`
|
||||||
TLSCacheSize int `config:"tls_cache_size"`
|
|
||||||
DisableTLS13 bool `config:"disable_tls13"`
|
|
||||||
Concurrency int `config:"concurrency"`
|
Concurrency int `config:"concurrency"`
|
||||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||||
DisableEPSV bool `config:"disable_epsv"`
|
DisableEPSV bool `config:"disable_epsv"`
|
||||||
DisableMLSD bool `config:"disable_mlsd"`
|
DisableMLSD bool `config:"disable_mlsd"`
|
||||||
WritingMDTM bool `config:"writing_mdtm"`
|
|
||||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -198,9 +165,6 @@ type Fs struct {
|
|||||||
tokens *pacer.TokenDispenser
|
tokens *pacer.TokenDispenser
|
||||||
tlsConf *tls.Config
|
tlsConf *tls.Config
|
||||||
pacer *fs.Pacer // pacer for FTP connections
|
pacer *fs.Pacer // pacer for FTP connections
|
||||||
fGetTime bool // true if the ftp library accepts GetTime
|
|
||||||
fSetTime bool // true if the ftp library accepts SetTime
|
|
||||||
fLstTime bool // true if the List call returns precise time
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes an FTP file
|
// Object describes an FTP file
|
||||||
@@ -215,7 +179,6 @@ type FileInfo struct {
|
|||||||
Name string
|
Name string
|
||||||
Size uint64
|
Size uint64
|
||||||
ModTime time.Time
|
ModTime time.Time
|
||||||
precise bool // true if the time is precise
|
|
||||||
IsDir bool
|
IsDir bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -278,6 +241,23 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
|||||||
return len(p), nil
|
return len(p), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type dialCtx struct {
|
||||||
|
f *Fs
|
||||||
|
ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
// dial a new connection with fshttp dialer
|
||||||
|
func (d *dialCtx) dial(network, address string) (net.Conn, error) {
|
||||||
|
conn, err := fshttp.NewDialer(d.ctx).Dial(network, address)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if d.f.tlsConf != nil {
|
||||||
|
conn = tls.Client(conn, d.f.tlsConf)
|
||||||
|
}
|
||||||
|
return conn, err
|
||||||
|
}
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this err deserve to be
|
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||||
// retried. It returns the err as a convenience
|
// retried. It returns the err as a convenience
|
||||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||||
@@ -297,22 +277,9 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
// Open a new connection to the FTP server.
|
// Open a new connection to the FTP server.
|
||||||
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||||
fs.Debugf(f, "Connecting to FTP server")
|
fs.Debugf(f, "Connecting to FTP server")
|
||||||
|
dCtx := dialCtx{f, ctx}
|
||||||
// Make ftp library dial with fshttp dialer optionally using TLS
|
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dCtx.dial)}
|
||||||
dial := func(network, address string) (conn net.Conn, err error) {
|
if f.opt.ExplicitTLS {
|
||||||
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
|
||||||
if f.tlsConf != nil && err == nil {
|
|
||||||
conn = tls.Client(conn, f.tlsConf)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
|
|
||||||
|
|
||||||
if f.opt.TLS {
|
|
||||||
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
|
||||||
// as a trigger for sending PSBZ and PROT options to server.
|
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
|
||||||
} else if f.opt.ExplicitTLS {
|
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||||
// Initial connection needs to be cleartext for explicit TLS
|
// Initial connection needs to be cleartext for explicit TLS
|
||||||
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
|
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
|
||||||
@@ -327,12 +294,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
|||||||
if f.opt.DisableMLSD {
|
if f.opt.DisableMLSD {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
||||||
}
|
}
|
||||||
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
|
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
|
|
||||||
}
|
|
||||||
if f.opt.WritingMDTM {
|
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
|
|
||||||
}
|
|
||||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||||
}
|
}
|
||||||
@@ -470,12 +431,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
ServerName: opt.Host,
|
ServerName: opt.Host,
|
||||||
InsecureSkipVerify: opt.SkipVerifyTLSCert,
|
InsecureSkipVerify: opt.SkipVerifyTLSCert,
|
||||||
}
|
}
|
||||||
if opt.TLSCacheSize > 0 {
|
|
||||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
|
|
||||||
}
|
|
||||||
if opt.DisableTLS13 {
|
|
||||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
u := protocol + path.Join(dialAddr+"/", root)
|
u := protocol + path.Join(dialAddr+"/", root)
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
@@ -504,12 +459,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "NewFs")
|
return nil, errors.Wrap(err, "NewFs")
|
||||||
}
|
}
|
||||||
f.fGetTime = c.IsGetTimeSupported()
|
|
||||||
f.fSetTime = c.IsSetTimeSupported()
|
|
||||||
f.fLstTime = c.IsTimePreciseInList()
|
|
||||||
if !f.fLstTime && f.fGetTime {
|
|
||||||
f.features.SlowModTime = true
|
|
||||||
}
|
|
||||||
f.putFtpConnection(&c, nil)
|
f.putFtpConnection(&c, nil)
|
||||||
if root != "" {
|
if root != "" {
|
||||||
// Check to see if the root actually an existing file
|
// Check to see if the root actually an existing file
|
||||||
@@ -628,12 +577,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
|||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
o.info = &FileInfo{
|
info := &FileInfo{
|
||||||
Name: remote,
|
Name: remote,
|
||||||
Size: entry.Size,
|
Size: entry.Size,
|
||||||
ModTime: entry.Time,
|
ModTime: entry.Time,
|
||||||
precise: f.fLstTime,
|
|
||||||
}
|
}
|
||||||
|
o.info = info
|
||||||
|
|
||||||
return o, nil
|
return o, nil
|
||||||
}
|
}
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
@@ -728,7 +678,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
Name: newremote,
|
Name: newremote,
|
||||||
Size: object.Size,
|
Size: object.Size,
|
||||||
ModTime: object.Time,
|
ModTime: object.Time,
|
||||||
precise: f.fLstTime,
|
|
||||||
}
|
}
|
||||||
o.info = info
|
o.info = info
|
||||||
entries = append(entries, o)
|
entries = append(entries, o)
|
||||||
@@ -742,18 +691,8 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision shows whether modified time is supported or not depending on the
|
// Precision shows Modified Time not supported
|
||||||
// FTP server capabilities, namely whether FTP server:
|
|
||||||
// - accepts the MDTM command to get file time (fGetTime)
|
|
||||||
// or supports MLSD returning precise file time in the list (fLstTime)
|
|
||||||
// - accepts the MFMT command to set file time (fSetTime)
|
|
||||||
// or non-standard form of the MDTM command (fSetTime, too)
|
|
||||||
// used by VsFtpd for the same purpose (WritingMDTM)
|
|
||||||
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
func (f *Fs) Precision() time.Duration {
|
||||||
if (f.fGetTime || f.fLstTime) && f.fSetTime {
|
|
||||||
return time.Second
|
|
||||||
}
|
|
||||||
return fs.ModTimeNotSupported
|
return fs.ModTimeNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -805,7 +744,6 @@ func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err erro
|
|||||||
Name: remote,
|
Name: remote,
|
||||||
Size: file.Size,
|
Size: file.Size,
|
||||||
ModTime: file.Time,
|
ModTime: file.Time,
|
||||||
precise: f.fLstTime,
|
|
||||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||||
}
|
}
|
||||||
return info, nil
|
return info, nil
|
||||||
@@ -991,42 +929,13 @@ func (o *Object) Size() int64 {
|
|||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
if !o.info.precise && o.fs.fGetTime {
|
|
||||||
c, err := o.fs.getFtpConnection(ctx)
|
|
||||||
if err == nil {
|
|
||||||
path := path.Join(o.fs.root, o.remote)
|
|
||||||
path = o.fs.opt.Enc.FromStandardPath(path)
|
|
||||||
modTime, err := c.GetTime(path)
|
|
||||||
if err == nil && o.info != nil {
|
|
||||||
o.info.ModTime = modTime
|
|
||||||
o.info.precise = true
|
|
||||||
}
|
|
||||||
o.fs.putFtpConnection(&c, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return o.info.ModTime
|
return o.info.ModTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the object
|
// SetModTime sets the modification time of the object
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
if !o.fs.fSetTime {
|
|
||||||
fs.Errorf(o.fs, "SetModTime is not supported")
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
c, err := o.fs.getFtpConnection(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
path := path.Join(o.fs.root, o.remote)
|
|
||||||
path = o.fs.opt.Enc.FromStandardPath(path)
|
|
||||||
err = c.SetTime(path, modTime.In(time.UTC))
|
|
||||||
if err == nil && o.info != nil {
|
|
||||||
o.info.ModTime = modTime
|
|
||||||
o.info.precise = true
|
|
||||||
}
|
|
||||||
o.fs.putFtpConnection(&c, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable returns a boolean as to whether this object is storable
|
// Storable returns a boolean as to whether this object is storable
|
||||||
func (o *Object) Storable() bool {
|
func (o *Object) Storable() bool {
|
||||||
@@ -1058,11 +967,7 @@ func (f *ftpReadCloser) Close() error {
|
|||||||
errchan <- f.rc.Close()
|
errchan <- f.rc.Close()
|
||||||
}()
|
}()
|
||||||
// Wait for Close for up to 60 seconds by default
|
// Wait for Close for up to 60 seconds by default
|
||||||
closeTimeout := f.f.opt.CloseTimeout
|
timer := time.NewTimer(time.Duration(f.f.opt.CloseTimeout))
|
||||||
if closeTimeout == 0 {
|
|
||||||
closeTimeout = fs.DurationOff
|
|
||||||
}
|
|
||||||
timer := time.NewTimer(time.Duration(closeTimeout))
|
|
||||||
select {
|
select {
|
||||||
case err = <-errchan:
|
case err = <-errchan:
|
||||||
timer.Stop()
|
timer.Stop()
|
||||||
@@ -1149,27 +1054,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return errors.Wrap(err, "Update")
|
return errors.Wrap(err, "Update")
|
||||||
}
|
}
|
||||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||||
// Ignore error 250 here - send by some servers
|
|
||||||
if err != nil {
|
|
||||||
switch errX := err.(type) {
|
|
||||||
case *textproto.Error:
|
|
||||||
switch errX.Code {
|
|
||||||
case ftp.StatusRequestedFileActionOK:
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = c.Quit() // toss this connection to avoid sync errors
|
_ = c.Quit() // toss this connection to avoid sync errors
|
||||||
// recycle connection in advance to let remove() find free token
|
|
||||||
o.fs.putFtpConnection(nil, err)
|
|
||||||
remove()
|
remove()
|
||||||
|
o.fs.putFtpConnection(nil, err)
|
||||||
return errors.Wrap(err, "update stor")
|
return errors.Wrap(err, "update stor")
|
||||||
}
|
}
|
||||||
o.fs.putFtpConnection(&c, nil)
|
o.fs.putFtpConnection(&c, nil)
|
||||||
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
|
||||||
return errors.Wrap(err, "SetModTime")
|
|
||||||
}
|
|
||||||
o.info, err = o.fs.getInfo(ctx, path)
|
o.info, err = o.fs.getInfo(ctx, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "update getinfo")
|
return errors.Wrap(err, "update getinfo")
|
||||||
|
|||||||
@@ -1,115 +0,0 @@
|
|||||||
package ftp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/object"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/rclone/rclone/lib/readers"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
type settings map[string]interface{}
|
|
||||||
|
|
||||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
|
|
||||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
|
||||||
configMap := configmap.Simple{}
|
|
||||||
for key, val := range opts {
|
|
||||||
configMap[key] = fmt.Sprintf("%v", val)
|
|
||||||
}
|
|
||||||
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), f.Root())
|
|
||||||
fixFs, err := fs.NewFs(ctx, remote)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return fixFs
|
|
||||||
}
|
|
||||||
|
|
||||||
// test that big file uploads do not cause network i/o timeout
|
|
||||||
func (f *Fs) testUploadTimeout(t *testing.T) {
|
|
||||||
const (
|
|
||||||
fileSize = 100000000 // 100 MiB
|
|
||||||
idleTimeout = 40 * time.Millisecond // small because test server is local
|
|
||||||
maxTime = 5 * time.Second // prevent test hangup
|
|
||||||
)
|
|
||||||
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("not running with -short")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
saveLowLevelRetries := ci.LowLevelRetries
|
|
||||||
saveTimeout := ci.Timeout
|
|
||||||
defer func() {
|
|
||||||
ci.LowLevelRetries = saveLowLevelRetries
|
|
||||||
ci.Timeout = saveTimeout
|
|
||||||
}()
|
|
||||||
ci.LowLevelRetries = 1
|
|
||||||
ci.Timeout = idleTimeout
|
|
||||||
|
|
||||||
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
|
|
||||||
fixFs := deriveFs(ctx, t, f, settings{
|
|
||||||
"concurrency": concurrency,
|
|
||||||
"shut_timeout": shutTimeout,
|
|
||||||
})
|
|
||||||
|
|
||||||
// Make test object
|
|
||||||
fileTime := fstest.Time("2020-03-08T09:30:00.000000000Z")
|
|
||||||
meta := object.NewStaticObjectInfo("upload-timeout.test", fileTime, int64(fileSize), true, nil, nil)
|
|
||||||
data := readers.NewPatternReader(int64(fileSize))
|
|
||||||
|
|
||||||
// Run upload and ensure maximum time
|
|
||||||
done := make(chan bool)
|
|
||||||
deadline := time.After(maxTime)
|
|
||||||
go func() {
|
|
||||||
obj, err = fixFs.Put(ctx, data, meta)
|
|
||||||
done <- true
|
|
||||||
}()
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
case <-deadline:
|
|
||||||
t.Fatalf("Upload got stuck for %v !", maxTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// non-zero shut_timeout should fix i/o errors
|
|
||||||
obj, err := upload(f.opt.Concurrency, time.Second)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotNil(t, obj)
|
|
||||||
if obj != nil {
|
|
||||||
_ = obj.Remove(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// rclone must support precise time with ProFtpd and PureFtpd out of the box.
|
|
||||||
// The VsFtpd server does not support the MFMT command to set file time like
|
|
||||||
// other servers but by default supports the MDTM command in the non-standard
|
|
||||||
// two-argument form for the same purpose.
|
|
||||||
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
|
|
||||||
func (f *Fs) testTimePrecision(t *testing.T) {
|
|
||||||
name := f.Name()
|
|
||||||
if pos := strings.Index(name, "{"); pos != -1 {
|
|
||||||
name = name[:pos]
|
|
||||||
}
|
|
||||||
switch name {
|
|
||||||
case "TestFTPProftpd", "TestFTPPureftpd", "TestFTPVsftpd":
|
|
||||||
assert.LessOrEqual(t, f.Precision(), time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalTest dispatches all internal tests
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
t.Run("UploadTimeout", f.testUploadTimeout)
|
|
||||||
t.Run("TimePrecision", f.testTimePrecision)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
@@ -9,27 +9,25 @@ import (
|
|||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against rclone FTP server
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestFTPRclone:",
|
|
||||||
NilObject: (*ftp.Object)(nil),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestIntegrationProftpd runs integration tests against proFTPd
|
|
||||||
func TestIntegrationProftpd(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("skipping as -remote is set")
|
|
||||||
}
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestFTPProftpd:",
|
RemoteName: "TestFTPProftpd:",
|
||||||
NilObject: (*ftp.Object)(nil),
|
NilObject: (*ftp.Object)(nil),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIntegrationPureftpd runs integration tests against pureFTPd
|
func TestIntegration2(t *testing.T) {
|
||||||
func TestIntegrationPureftpd(t *testing.T) {
|
if *fstest.RemoteName != "" {
|
||||||
|
t.Skip("skipping as -remote is set")
|
||||||
|
}
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestFTPRclone:",
|
||||||
|
NilObject: (*ftp.Object)(nil),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIntegration3(t *testing.T) {
|
||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("skipping as -remote is set")
|
t.Skip("skipping as -remote is set")
|
||||||
}
|
}
|
||||||
@@ -39,13 +37,12 @@ func TestIntegrationPureftpd(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIntegrationVsftpd runs integration tests against vsFTPd
|
// func TestIntegration4(t *testing.T) {
|
||||||
func TestIntegrationVsftpd(t *testing.T) {
|
// if *fstest.RemoteName != "" {
|
||||||
if *fstest.RemoteName != "" {
|
// t.Skip("skipping as -remote is set")
|
||||||
t.Skip("skipping as -remote is set")
|
// }
|
||||||
}
|
// fstests.Run(t, &fstests.Opt{
|
||||||
fstests.Run(t, &fstests.Opt{
|
// RemoteName: "TestFTPVsftpd:",
|
||||||
RemoteName: "TestFTPVsftpd:",
|
// NilObject: (*ftp.Object)(nil),
|
||||||
NilObject: (*ftp.Object)(nil),
|
// })
|
||||||
})
|
// }
|
||||||
}
|
|
||||||
|
|||||||
@@ -19,9 +19,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -51,9 +51,9 @@ import (
|
|||||||
const (
|
const (
|
||||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||||
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
|
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
|
||||||
timeFormat = time.RFC3339Nano
|
timeFormatIn = time.RFC3339
|
||||||
metaMtime = "mtime" // key to store mtime in metadata
|
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||||
metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata
|
metaMtime = "mtime" // key to store mtime under in metadata
|
||||||
listChunks = 1000 // chunk size to read directory listings
|
listChunks = 1000 // chunk size to read directory listings
|
||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
)
|
)
|
||||||
@@ -76,71 +76,72 @@ func init() {
|
|||||||
Prefix: "gcs",
|
Prefix: "gcs",
|
||||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
saFile, _ := m.Get("service_account_file")
|
saFile, _ := m.Get("service_account_file")
|
||||||
saCreds, _ := m.Get("service_account_credentials")
|
saCreds, _ := m.Get("service_account_credentials")
|
||||||
anonymous, _ := m.Get("anonymous")
|
anonymous, _ := m.Get("anonymous")
|
||||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||||
return nil, nil
|
return
|
||||||
|
}
|
||||||
|
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
|
||||||
OAuth2Config: storageConfig,
|
|
||||||
})
|
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "project_number",
|
Name: "project_number",
|
||||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||||
}, {
|
}, {
|
||||||
Name: "service_account_file",
|
Name: "service_account_file",
|
||||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||||
}, {
|
}, {
|
||||||
Name: "service_account_credentials",
|
Name: "service_account_credentials",
|
||||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||||
Hide: fs.OptionHideBoth,
|
Hide: fs.OptionHideBoth,
|
||||||
}, {
|
}, {
|
||||||
Name: "anonymous",
|
Name: "anonymous",
|
||||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "object_acl",
|
Name: "object_acl",
|
||||||
Help: "Access Control List for new objects.",
|
Help: "Access Control List for new objects.",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "authenticatedRead",
|
Value: "authenticatedRead",
|
||||||
Help: "Object owner gets OWNER access.\nAll Authenticated Users get READER access.",
|
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.",
|
||||||
}, {
|
}, {
|
||||||
Value: "bucketOwnerFullControl",
|
Value: "bucketOwnerFullControl",
|
||||||
Help: "Object owner gets OWNER access.\nProject team owners get OWNER access.",
|
Help: "Object owner gets OWNER access, and project team owners get OWNER access.",
|
||||||
}, {
|
}, {
|
||||||
Value: "bucketOwnerRead",
|
Value: "bucketOwnerRead",
|
||||||
Help: "Object owner gets OWNER access.\nProject team owners get READER access.",
|
Help: "Object owner gets OWNER access, and project team owners get READER access.",
|
||||||
}, {
|
}, {
|
||||||
Value: "private",
|
Value: "private",
|
||||||
Help: "Object owner gets OWNER access.\nDefault if left blank.",
|
Help: "Object owner gets OWNER access [default if left blank].",
|
||||||
}, {
|
}, {
|
||||||
Value: "projectPrivate",
|
Value: "projectPrivate",
|
||||||
Help: "Object owner gets OWNER access.\nProject team members get access according to their roles.",
|
Help: "Object owner gets OWNER access, and project team members get access according to their roles.",
|
||||||
}, {
|
}, {
|
||||||
Value: "publicRead",
|
Value: "publicRead",
|
||||||
Help: "Object owner gets OWNER access.\nAll Users get READER access.",
|
Help: "Object owner gets OWNER access, and all Users get READER access.",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "bucket_acl",
|
Name: "bucket_acl",
|
||||||
Help: "Access Control List for new buckets.",
|
Help: "Access Control List for new buckets.",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "authenticatedRead",
|
Value: "authenticatedRead",
|
||||||
Help: "Project team owners get OWNER access.\nAll Authenticated Users get READER access.",
|
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.",
|
||||||
}, {
|
}, {
|
||||||
Value: "private",
|
Value: "private",
|
||||||
Help: "Project team owners get OWNER access.\nDefault if left blank.",
|
Help: "Project team owners get OWNER access [default if left blank].",
|
||||||
}, {
|
}, {
|
||||||
Value: "projectPrivate",
|
Value: "projectPrivate",
|
||||||
Help: "Project team members get access according to their roles.",
|
Help: "Project team members get access according to their roles.",
|
||||||
}, {
|
}, {
|
||||||
Value: "publicRead",
|
Value: "publicRead",
|
||||||
Help: "Project team owners get OWNER access.\nAll Users get READER access.",
|
Help: "Project team owners get OWNER access, and all Users get READER access.",
|
||||||
}, {
|
}, {
|
||||||
Value: "publicReadWrite",
|
Value: "publicReadWrite",
|
||||||
Help: "Project team owners get OWNER access.\nAll Users get WRITER access.",
|
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "bucket_policy_only",
|
Name: "bucket_policy_only",
|
||||||
@@ -163,64 +164,64 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
|||||||
Help: "Location for the newly created buckets.",
|
Help: "Location for the newly created buckets.",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "",
|
Value: "",
|
||||||
Help: "Empty for default location (US)",
|
Help: "Empty for default location (US).",
|
||||||
}, {
|
}, {
|
||||||
Value: "asia",
|
Value: "asia",
|
||||||
Help: "Multi-regional location for Asia",
|
Help: "Multi-regional location for Asia.",
|
||||||
}, {
|
}, {
|
||||||
Value: "eu",
|
Value: "eu",
|
||||||
Help: "Multi-regional location for Europe",
|
Help: "Multi-regional location for Europe.",
|
||||||
}, {
|
}, {
|
||||||
Value: "us",
|
Value: "us",
|
||||||
Help: "Multi-regional location for United States",
|
Help: "Multi-regional location for United States.",
|
||||||
}, {
|
}, {
|
||||||
Value: "asia-east1",
|
Value: "asia-east1",
|
||||||
Help: "Taiwan",
|
Help: "Taiwan.",
|
||||||
}, {
|
}, {
|
||||||
Value: "asia-east2",
|
Value: "asia-east2",
|
||||||
Help: "Hong Kong",
|
Help: "Hong Kong.",
|
||||||
}, {
|
}, {
|
||||||
Value: "asia-northeast1",
|
Value: "asia-northeast1",
|
||||||
Help: "Tokyo",
|
Help: "Tokyo.",
|
||||||
}, {
|
}, {
|
||||||
Value: "asia-south1",
|
Value: "asia-south1",
|
||||||
Help: "Mumbai",
|
Help: "Mumbai.",
|
||||||
}, {
|
}, {
|
||||||
Value: "asia-southeast1",
|
Value: "asia-southeast1",
|
||||||
Help: "Singapore",
|
Help: "Singapore.",
|
||||||
}, {
|
}, {
|
||||||
Value: "australia-southeast1",
|
Value: "australia-southeast1",
|
||||||
Help: "Sydney",
|
Help: "Sydney.",
|
||||||
}, {
|
}, {
|
||||||
Value: "europe-north1",
|
Value: "europe-north1",
|
||||||
Help: "Finland",
|
Help: "Finland.",
|
||||||
}, {
|
}, {
|
||||||
Value: "europe-west1",
|
Value: "europe-west1",
|
||||||
Help: "Belgium",
|
Help: "Belgium.",
|
||||||
}, {
|
}, {
|
||||||
Value: "europe-west2",
|
Value: "europe-west2",
|
||||||
Help: "London",
|
Help: "London.",
|
||||||
}, {
|
}, {
|
||||||
Value: "europe-west3",
|
Value: "europe-west3",
|
||||||
Help: "Frankfurt",
|
Help: "Frankfurt.",
|
||||||
}, {
|
}, {
|
||||||
Value: "europe-west4",
|
Value: "europe-west4",
|
||||||
Help: "Netherlands",
|
Help: "Netherlands.",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-central1",
|
Value: "us-central1",
|
||||||
Help: "Iowa",
|
Help: "Iowa.",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-east1",
|
Value: "us-east1",
|
||||||
Help: "South Carolina",
|
Help: "South Carolina.",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-east4",
|
Value: "us-east4",
|
||||||
Help: "Northern Virginia",
|
Help: "Northern Virginia.",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-west1",
|
Value: "us-west1",
|
||||||
Help: "Oregon",
|
Help: "Oregon.",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-west2",
|
Value: "us-west2",
|
||||||
Help: "California",
|
Help: "California.",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "storage_class",
|
Name: "storage_class",
|
||||||
@@ -921,7 +922,7 @@ func (o *Object) setMetaData(info *storage.Object) {
|
|||||||
// read mtime out of metadata if available
|
// read mtime out of metadata if available
|
||||||
mtimeString, ok := info.Metadata[metaMtime]
|
mtimeString, ok := info.Metadata[metaMtime]
|
||||||
if ok {
|
if ok {
|
||||||
modTime, err := time.Parse(timeFormat, mtimeString)
|
modTime, err := time.Parse(timeFormatIn, mtimeString)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
o.modTime = modTime
|
o.modTime = modTime
|
||||||
return
|
return
|
||||||
@@ -929,19 +930,8 @@ func (o *Object) setMetaData(info *storage.Object) {
|
|||||||
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
|
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallback to GSUtil mtime
|
|
||||||
mtimeGsutilString, ok := info.Metadata[metaMtimeGsutil]
|
|
||||||
if ok {
|
|
||||||
unixTimeSec, err := strconv.ParseInt(mtimeGsutilString, 10, 64)
|
|
||||||
if err == nil {
|
|
||||||
o.modTime = time.Unix(unixTimeSec, 0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fs.Debugf(o, "Failed to read GSUtil mtime from metadata: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback to the Updated time
|
// Fallback to the Updated time
|
||||||
modTime, err := time.Parse(timeFormat, info.Updated)
|
modTime, err := time.Parse(timeFormatIn, info.Updated)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Bad time decode: %v", err)
|
fs.Logf(o, "Bad time decode: %v", err)
|
||||||
} else {
|
} else {
|
||||||
@@ -998,8 +988,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||||||
// Returns metadata for an object
|
// Returns metadata for an object
|
||||||
func metadataFromModTime(modTime time.Time) map[string]string {
|
func metadataFromModTime(modTime time.Time) map[string]string {
|
||||||
metadata := make(map[string]string, 1)
|
metadata := make(map[string]string, 1)
|
||||||
metadata[metaMtime] = modTime.Format(timeFormat)
|
metadata[metaMtime] = modTime.Format(timeFormatOut)
|
||||||
metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
|
|
||||||
return metadata
|
return metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1011,11 +1000,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Add the mtime to the existing metadata
|
// Add the mtime to the existing metadata
|
||||||
|
mtime := modTime.Format(timeFormatOut)
|
||||||
if object.Metadata == nil {
|
if object.Metadata == nil {
|
||||||
object.Metadata = make(map[string]string, 1)
|
object.Metadata = make(map[string]string, 1)
|
||||||
}
|
}
|
||||||
object.Metadata[metaMtime] = modTime.Format(timeFormat)
|
object.Metadata[metaMtime] = mtime
|
||||||
object.Metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
|
|
||||||
// Copy the object to itself to update the metadata
|
// Copy the object to itself to update the metadata
|
||||||
// Using PATCH requires too many permissions
|
// Using PATCH requires too many permissions
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
golog "log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
@@ -20,7 +21,6 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
@@ -29,7 +29,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/log"
|
"github.com/rclone/rclone/fs/log"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
@@ -55,7 +54,6 @@ const (
|
|||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
||||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
||||||
scopeAccess = 2 // position of access scope in list
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -64,7 +62,7 @@ var (
|
|||||||
Scopes: []string{
|
Scopes: []string{
|
||||||
"openid",
|
"openid",
|
||||||
"profile",
|
"profile",
|
||||||
scopeReadWrite, // this must be at position scopeAccess
|
scopeReadWrite,
|
||||||
},
|
},
|
||||||
Endpoint: google.Endpoint,
|
Endpoint: google.Endpoint,
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
@@ -80,36 +78,36 @@ func init() {
|
|||||||
Prefix: "gphotos",
|
Prefix: "gphotos",
|
||||||
Description: "Google Photos",
|
Description: "Google Photos",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't parse config into struct")
|
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
switch config.State {
|
|
||||||
case "":
|
|
||||||
// Fill in the scopes
|
// Fill in the scopes
|
||||||
if opt.ReadOnly {
|
if opt.ReadOnly {
|
||||||
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
|
oauthConfig.Scopes[0] = scopeReadOnly
|
||||||
} else {
|
} else {
|
||||||
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
|
oauthConfig.Scopes[0] = scopeReadWrite
|
||||||
}
|
}
|
||||||
return oauthutil.ConfigOut("warning", &oauthutil.Options{
|
|
||||||
OAuth2Config: oauthConfig,
|
|
||||||
})
|
|
||||||
case "warning":
|
|
||||||
// Warn the user as required by google photos integration
|
|
||||||
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
|
|
||||||
|
|
||||||
IMPORTANT: All media items uploaded to Google Photos with rclone
|
// Do the oauth
|
||||||
are stored in full resolution at original quality. These uploads
|
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
|
||||||
will count towards storage in your Google Account.`)
|
if err != nil {
|
||||||
case "warning_done":
|
golog.Fatalf("Failed to configure token: %v", err)
|
||||||
return nil, nil
|
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
|
||||||
|
// Warn the user
|
||||||
|
fmt.Print(`
|
||||||
|
*** IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||||
|
*** are stored in full resolution at original quality. These uploads
|
||||||
|
*** will count towards storage in your Google Account.
|
||||||
|
|
||||||
|
`)
|
||||||
|
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "read_only",
|
Name: "read_only",
|
||||||
@@ -132,7 +130,7 @@ you want to read the media.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "start_year",
|
Name: "start_year",
|
||||||
Default: 2000,
|
Default: 2000,
|
||||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year.`,
|
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "include_archived",
|
Name: "include_archived",
|
||||||
@@ -151,13 +149,6 @@ listings and transferred.
|
|||||||
Without this flag, archived media will not be visible in directory
|
Without this flag, archived media will not be visible in directory
|
||||||
listings and won't be transferred.`,
|
listings and won't be transferred.`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: config.ConfigEncoding,
|
|
||||||
Help: config.ConfigEncodingHelp,
|
|
||||||
Advanced: true,
|
|
||||||
Default: (encoder.Base |
|
|
||||||
encoder.EncodeCrLf |
|
|
||||||
encoder.EncodeInvalidUtf8),
|
|
||||||
}}...),
|
}}...),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -168,7 +159,6 @@ type Options struct {
|
|||||||
ReadSize bool `config:"read_size"`
|
ReadSize bool `config:"read_size"`
|
||||||
StartYear int `config:"start_year"`
|
StartYear int `config:"start_year"`
|
||||||
IncludeArchived bool `config:"include_archived"`
|
IncludeArchived bool `config:"include_archived"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote storage server
|
// Fs represents a remote storage server
|
||||||
@@ -506,9 +496,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
|
|||||||
lastID = newAlbums[len(newAlbums)-1].ID
|
lastID = newAlbums[len(newAlbums)-1].ID
|
||||||
}
|
}
|
||||||
for i := range newAlbums {
|
for i := range newAlbums {
|
||||||
anAlbum := newAlbums[i]
|
all.add(&newAlbums[i])
|
||||||
anAlbum.Title = f.opt.Enc.FromStandardPath(anAlbum.Title)
|
|
||||||
all.add(&anAlbum)
|
|
||||||
}
|
}
|
||||||
if result.NextPageToken == "" {
|
if result.NextPageToken == "" {
|
||||||
break
|
break
|
||||||
|
|||||||
@@ -1,179 +0,0 @@
|
|||||||
package hasher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/lib/kv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Command the backend to run a named command
|
|
||||||
//
|
|
||||||
// The command run is name
|
|
||||||
// args may be used to read arguments from
|
|
||||||
// opts may be used to read optional arguments from
|
|
||||||
//
|
|
||||||
// The result should be capable of being JSON encoded
|
|
||||||
// If it is a string or a []string it will be shown to the user
|
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
|
||||||
switch name {
|
|
||||||
case "drop":
|
|
||||||
return nil, f.db.Stop(true)
|
|
||||||
case "dump", "fulldump":
|
|
||||||
return nil, f.dbDump(ctx, name == "fulldump", "")
|
|
||||||
case "import", "stickyimport":
|
|
||||||
sticky := name == "stickyimport"
|
|
||||||
if len(arg) != 2 {
|
|
||||||
return nil, errors.New("please provide checksum type and path to sum file")
|
|
||||||
}
|
|
||||||
return nil, f.dbImport(ctx, arg[0], arg[1], sticky)
|
|
||||||
default:
|
|
||||||
return nil, fs.ErrorCommandNotFound
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
|
||||||
Name: "drop",
|
|
||||||
Short: "Drop cache",
|
|
||||||
Long: `Completely drop checksum cache.
|
|
||||||
Usage Example:
|
|
||||||
rclone backend drop hasher:
|
|
||||||
`,
|
|
||||||
}, {
|
|
||||||
Name: "dump",
|
|
||||||
Short: "Dump the database",
|
|
||||||
Long: "Dump cache records covered by the current remote",
|
|
||||||
}, {
|
|
||||||
Name: "fulldump",
|
|
||||||
Short: "Full dump of the database",
|
|
||||||
Long: "Dump all cache records in the database",
|
|
||||||
}, {
|
|
||||||
Name: "import",
|
|
||||||
Short: "Import a SUM file",
|
|
||||||
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
|
|
||||||
Usage Example:
|
|
||||||
rclone backend import hasher:subdir md5 /path/to/sum.md5
|
|
||||||
`,
|
|
||||||
}, {
|
|
||||||
Name: "stickyimport",
|
|
||||||
Short: "Perform fast import of a SUM file",
|
|
||||||
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
|
|
||||||
Usage Example:
|
|
||||||
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
|
|
||||||
`,
|
|
||||||
}}
|
|
||||||
|
|
||||||
func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
|
|
||||||
if root == "" {
|
|
||||||
remoteFs, err := cache.Get(ctx, f.opt.Remote)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
|
|
||||||
}
|
|
||||||
op := &kvDump{
|
|
||||||
full: full,
|
|
||||||
root: root,
|
|
||||||
path: f.db.Path(),
|
|
||||||
fs: f,
|
|
||||||
}
|
|
||||||
err := f.db.Do(false, op)
|
|
||||||
if err == kv.ErrEmpty {
|
|
||||||
fs.Infof(op.path, "empty")
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bool) error {
|
|
||||||
var hashType hash.Type
|
|
||||||
if err := hashType.Set(hashName); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if hashType == hash.None {
|
|
||||||
return errors.New("please provide a valid hash type")
|
|
||||||
}
|
|
||||||
if !f.suppHashes.Contains(hashType) {
|
|
||||||
return errors.New("unsupported hash type")
|
|
||||||
}
|
|
||||||
if !f.keepHashes.Contains(hashType) {
|
|
||||||
fs.Infof(nil, "Need not import hashes of this type")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
_, sumPath, err := fspath.SplitFs(sumRemote)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sumFs, err := cache.Get(ctx, sumRemote)
|
|
||||||
switch err {
|
|
||||||
case fs.ErrorIsFile:
|
|
||||||
// ok
|
|
||||||
case nil:
|
|
||||||
return errors.Errorf("not a file: %s", sumRemote)
|
|
||||||
default:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
sumObj, err := sumFs.NewObject(ctx, path.Base(sumPath))
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "cannot open sum file")
|
|
||||||
}
|
|
||||||
hashes, err := operations.ParseSumFile(ctx, sumObj)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to parse sum file")
|
|
||||||
}
|
|
||||||
|
|
||||||
if sticky {
|
|
||||||
rootPath := f.Fs.Root()
|
|
||||||
for remote, hashVal := range hashes {
|
|
||||||
key := path.Join(rootPath, remote)
|
|
||||||
hashSums := operations.HashSums{hashName: hashVal}
|
|
||||||
if err := f.putRawHashes(ctx, key, anyFingerprint, hashSums); err != nil {
|
|
||||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fs.Infof(nil, "Summary: %d checksum(s) imported", len(hashes))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const longImportThreshold = 100
|
|
||||||
if len(hashes) > longImportThreshold {
|
|
||||||
fs.Infof(nil, "Importing %d checksums. Please wait...", len(hashes))
|
|
||||||
}
|
|
||||||
|
|
||||||
doneCount := 0
|
|
||||||
err = operations.ListFn(ctx, f, func(obj fs.Object) {
|
|
||||||
remote := obj.Remote()
|
|
||||||
hash := hashes[remote]
|
|
||||||
hashes[remote] = "" // mark as handled
|
|
||||||
o, ok := obj.(*Object)
|
|
||||||
if ok && hash != "" {
|
|
||||||
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
|
|
||||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
|
||||||
}
|
|
||||||
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
|
|
||||||
doneCount++
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "Import failed: %v", err)
|
|
||||||
}
|
|
||||||
skipCount := 0
|
|
||||||
for remote, emptyOrDone := range hashes {
|
|
||||||
if emptyOrDone != "" {
|
|
||||||
fs.Infof(nil, "Skip vanished object: %s", remote)
|
|
||||||
skipCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fs.Infof(nil, "Summary: %d imported, %d skipped", doneCount, skipCount)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
@@ -1,508 +0,0 @@
|
|||||||
// Package hasher implements a checksum handling overlay backend
|
|
||||||
package hasher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/gob"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/kv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Register with Fs
|
|
||||||
func init() {
|
|
||||||
fs.Register(&fs.RegInfo{
|
|
||||||
Name: "hasher",
|
|
||||||
Description: "Better checksums for other remotes",
|
|
||||||
NewFs: NewFs,
|
|
||||||
CommandHelp: commandHelp,
|
|
||||||
Options: []fs.Option{{
|
|
||||||
Name: "remote",
|
|
||||||
Required: true,
|
|
||||||
Help: "Remote to cache checksums for (e.g. myRemote:path).",
|
|
||||||
}, {
|
|
||||||
Name: "hashes",
|
|
||||||
Default: fs.CommaSepList{"md5", "sha1"},
|
|
||||||
Advanced: false,
|
|
||||||
Help: "Comma separated list of supported checksum types.",
|
|
||||||
}, {
|
|
||||||
Name: "max_age",
|
|
||||||
Advanced: false,
|
|
||||||
Default: fs.DurationOff,
|
|
||||||
Help: "Maximum time to keep checksums in cache (0 = no cache, off = cache forever).",
|
|
||||||
}, {
|
|
||||||
Name: "auto_size",
|
|
||||||
Advanced: true,
|
|
||||||
Default: fs.SizeSuffix(0),
|
|
||||||
Help: "Auto-update checksum for files smaller than this size (disabled by default).",
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
|
||||||
type Options struct {
|
|
||||||
Remote string `config:"remote"`
|
|
||||||
Hashes fs.CommaSepList `config:"hashes"`
|
|
||||||
AutoSize fs.SizeSuffix `config:"auto_size"`
|
|
||||||
MaxAge fs.Duration `config:"max_age"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
|
||||||
type Fs struct {
|
|
||||||
fs.Fs
|
|
||||||
name string
|
|
||||||
root string
|
|
||||||
wrapper fs.Fs
|
|
||||||
features *fs.Features
|
|
||||||
opt *Options
|
|
||||||
db *kv.DB
|
|
||||||
// fingerprinting
|
|
||||||
fpTime bool // true if using time in fingerprints
|
|
||||||
fpHash hash.Type // hash type to use in fingerprints or None
|
|
||||||
// hash types triaged by groups
|
|
||||||
suppHashes hash.Set // all supported checksum types
|
|
||||||
passHashes hash.Set // passed directly to the base without caching
|
|
||||||
slowHashes hash.Set // passed to the base and then cached
|
|
||||||
autoHashes hash.Set // calculated in-house and cached
|
|
||||||
keepHashes hash.Set // checksums to keep in cache (slow + auto)
|
|
||||||
}
|
|
||||||
|
|
||||||
var warnExperimental sync.Once
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the remote:path string
|
|
||||||
func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs.Fs, error) {
|
|
||||||
if !kv.Supported() {
|
|
||||||
return nil, errors.New("hasher is not supported on this OS")
|
|
||||||
}
|
|
||||||
warnExperimental.Do(func() {
|
|
||||||
fs.Infof(nil, "Hasher is EXPERIMENTAL!")
|
|
||||||
})
|
|
||||||
|
|
||||||
opt := &Options{}
|
|
||||||
err := configstruct.Set(cmap, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(opt.Remote, fsname+":") {
|
|
||||||
return nil, errors.New("can't point remote at itself")
|
|
||||||
}
|
|
||||||
remotePath := fspath.JoinRootPath(opt.Remote, rpath)
|
|
||||||
baseFs, err := cache.Get(ctx, remotePath)
|
|
||||||
if err != nil && err != fs.ErrorIsFile {
|
|
||||||
return nil, errors.Wrapf(err, "failed to derive base remote %q", opt.Remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Fs{
|
|
||||||
Fs: baseFs,
|
|
||||||
name: fsname,
|
|
||||||
root: rpath,
|
|
||||||
opt: opt,
|
|
||||||
}
|
|
||||||
baseFeatures := baseFs.Features()
|
|
||||||
f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported
|
|
||||||
|
|
||||||
if baseFeatures.SlowHash {
|
|
||||||
f.slowHashes = f.Fs.Hashes()
|
|
||||||
} else {
|
|
||||||
f.passHashes = f.Fs.Hashes()
|
|
||||||
f.fpHash = f.passHashes.GetOne()
|
|
||||||
}
|
|
||||||
|
|
||||||
f.suppHashes = f.passHashes
|
|
||||||
f.suppHashes.Add(f.slowHashes.Array()...)
|
|
||||||
|
|
||||||
for _, hashName := range opt.Hashes {
|
|
||||||
var ht hash.Type
|
|
||||||
if err := ht.Set(hashName); err != nil {
|
|
||||||
return nil, errors.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
|
|
||||||
}
|
|
||||||
if !f.slowHashes.Contains(ht) {
|
|
||||||
f.autoHashes.Add(ht)
|
|
||||||
}
|
|
||||||
f.keepHashes.Add(ht)
|
|
||||||
f.suppHashes.Add(ht)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(f, "Groups by usage: cached %s, passed %s, auto %s, slow %s, supported %s",
|
|
||||||
f.keepHashes, f.passHashes, f.autoHashes, f.slowHashes, f.suppHashes)
|
|
||||||
|
|
||||||
var nilSet hash.Set
|
|
||||||
if f.keepHashes == nilSet {
|
|
||||||
return nil, errors.New("configured hash_names have nothing to keep in cache")
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.opt.MaxAge > 0 {
|
|
||||||
gob.Register(hashRecord{})
|
|
||||||
db, err := kv.Start(ctx, "hasher", f.Fs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f.db = db
|
|
||||||
}
|
|
||||||
|
|
||||||
stubFeatures := &fs.Features{
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
IsLocal: true,
|
|
||||||
ReadMimeType: true,
|
|
||||||
WriteMimeType: true,
|
|
||||||
}
|
|
||||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
|
||||||
|
|
||||||
cache.PinUntilFinalized(f.Fs, f)
|
|
||||||
return f, err
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Filesystem
|
|
||||||
//
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string { return f.name }
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string { return f.root }
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features { return f.features }
|
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
|
||||||
func (f *Fs) Hashes() hash.Set { return f.suppHashes }
|
|
||||||
|
|
||||||
// String returns a description of the FS
|
|
||||||
// The "hasher::" prefix is a distinctive feature.
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("hasher::%s:%s", f.name, f.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Fs that this Fs is wrapping
|
|
||||||
func (f *Fs) UnWrap() fs.Fs { return f.Fs }
|
|
||||||
|
|
||||||
// WrapFs returns the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) WrapFs() fs.Fs { return f.wrapper }
|
|
||||||
|
|
||||||
// SetWrapper sets the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper }
|
|
||||||
|
|
||||||
// Wrap base entries into hasher entries.
|
|
||||||
func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries, err error) {
|
|
||||||
hashEntries = baseEntries[:0] // work inplace
|
|
||||||
for _, entry := range baseEntries {
|
|
||||||
switch x := entry.(type) {
|
|
||||||
case fs.Object:
|
|
||||||
hashEntries = append(hashEntries, f.wrapObject(x, nil))
|
|
||||||
default:
|
|
||||||
hashEntries = append(hashEntries, entry) // trash in - trash out
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return hashEntries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
if entries, err = f.Fs.List(ctx, dir); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return f.wrapEntries(entries)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListR lists the objects and directories recursively into out.
|
|
||||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
|
||||||
return f.Fs.Features().ListR(ctx, dir, func(baseEntries fs.DirEntries) error {
|
|
||||||
hashEntries, err := f.wrapEntries(baseEntries)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return callback(hashEntries)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge a directory
|
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|
||||||
if do := f.Fs.Features().Purge; do != nil {
|
|
||||||
if err := do(ctx, dir); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err := f.db.Do(true, &kvPurge{
|
|
||||||
dir: path.Join(f.Fs.Root(), dir),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(f, "Failed to purge some hashes: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fs.ErrorCantPurge
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutStream uploads to the remote path with undeterminate size.
|
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
if do := f.Fs.Features().PutStream; do != nil {
|
|
||||||
_ = f.pruneHash(src.Remote())
|
|
||||||
oResult, err := do(ctx, in, src, options...)
|
|
||||||
return f.wrapObject(oResult, err), err
|
|
||||||
}
|
|
||||||
return nil, errors.New("PutStream not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutUnchecked uploads the object, allowing duplicates.
|
|
||||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
if do := f.Fs.Features().PutUnchecked; do != nil {
|
|
||||||
_ = f.pruneHash(src.Remote())
|
|
||||||
oResult, err := do(ctx, in, src, options...)
|
|
||||||
return f.wrapObject(oResult, err), err
|
|
||||||
}
|
|
||||||
return nil, errors.New("PutUnchecked not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
// pruneHash deletes hash for a path
|
|
||||||
func (f *Fs) pruneHash(remote string) error {
|
|
||||||
return f.db.Do(true, &kvPrune{
|
|
||||||
key: path.Join(f.Fs.Root(), remote),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
|
||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
|
||||||
if do := f.Fs.Features().CleanUp; do != nil {
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
return errors.New("CleanUp not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
// About gets quota information from the Fs
|
|
||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|
||||||
if do := f.Fs.Features().About; do != nil {
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
return nil, errors.New("About not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChangeNotify calls the passed function with a path that has had changes.
|
|
||||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
|
||||||
if do := f.Fs.Features().ChangeNotify; do != nil {
|
|
||||||
do(ctx, notifyFunc, pollIntervalChan)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserInfo returns info about the connected user
|
|
||||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
|
||||||
if do := f.Fs.Features().UserInfo; do != nil {
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disconnect the current user
|
|
||||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
|
||||||
if do := f.Fs.Features().Disconnect; do != nil {
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// MergeDirs merges the contents of all the directories passed
|
|
||||||
// in into the first one and rmdirs the other directories.
|
|
||||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|
||||||
if do := f.Fs.Features().MergeDirs; do != nil {
|
|
||||||
return do(ctx, dirs)
|
|
||||||
}
|
|
||||||
return errors.New("MergeDirs not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirCacheFlush resets the directory cache - used in testing
|
|
||||||
// as an optional interface
|
|
||||||
func (f *Fs) DirCacheFlush() {
|
|
||||||
if do := f.Fs.Features().DirCacheFlush; do != nil {
|
|
||||||
do()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
|
||||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
|
||||||
if do := f.Fs.Features().PublicLink; do != nil {
|
|
||||||
return do(ctx, remote, expire, unlink)
|
|
||||||
}
|
|
||||||
return "", errors.New("PublicLink not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
|
||||||
do := f.Fs.Features().Copy
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorCantCopy
|
|
||||||
}
|
|
||||||
o, ok := src.(*Object)
|
|
||||||
if !ok {
|
|
||||||
return nil, fs.ErrorCantCopy
|
|
||||||
}
|
|
||||||
oResult, err := do(ctx, o.Object, remote)
|
|
||||||
return f.wrapObject(oResult, err), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
|
||||||
do := f.Fs.Features().Move
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorCantMove
|
|
||||||
}
|
|
||||||
o, ok := src.(*Object)
|
|
||||||
if !ok {
|
|
||||||
return nil, fs.ErrorCantMove
|
|
||||||
}
|
|
||||||
oResult, err := do(ctx, o.Object, remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
_ = f.db.Do(true, &kvMove{
|
|
||||||
src: path.Join(f.Fs.Root(), src.Remote()),
|
|
||||||
dst: path.Join(f.Fs.Root(), remote),
|
|
||||||
dir: false,
|
|
||||||
fs: f,
|
|
||||||
})
|
|
||||||
return f.wrapObject(oResult, nil), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
|
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
|
||||||
do := f.Fs.Features().DirMove
|
|
||||||
if do == nil {
|
|
||||||
return fs.ErrorCantDirMove
|
|
||||||
}
|
|
||||||
srcFs, ok := src.(*Fs)
|
|
||||||
if !ok {
|
|
||||||
return fs.ErrorCantDirMove
|
|
||||||
}
|
|
||||||
err := do(ctx, srcFs.Fs, srcRemote, dstRemote)
|
|
||||||
if err == nil {
|
|
||||||
_ = f.db.Do(true, &kvMove{
|
|
||||||
src: path.Join(srcFs.Fs.Root(), srcRemote),
|
|
||||||
dst: path.Join(f.Fs.Root(), dstRemote),
|
|
||||||
dir: true,
|
|
||||||
fs: f,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown the backend, closing any background tasks and any cached connections.
|
|
||||||
func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
|
||||||
err = f.db.Stop(false)
|
|
||||||
if do := f.Fs.Features().Shutdown; do != nil {
|
|
||||||
if err2 := do(ctx); err2 != nil {
|
|
||||||
err = err2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote.
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
o, err := f.Fs.NewObject(ctx, remote)
|
|
||||||
return f.wrapObject(o, err), err
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Object
|
|
||||||
//
|
|
||||||
|
|
||||||
// Object represents a composite file wrapping one or more data chunks
|
|
||||||
type Object struct {
|
|
||||||
fs.Object
|
|
||||||
f *Fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrap base object into hasher object
|
|
||||||
func (f *Fs) wrapObject(o fs.Object, err error) *Object {
|
|
||||||
if err != nil || o == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &Object{Object: o, f: f}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns read only access to the Fs that this object is part of
|
|
||||||
func (o *Object) Fs() fs.Info { return o.f }
|
|
||||||
|
|
||||||
// UnWrap returns the wrapped Object
|
|
||||||
func (o *Object) UnWrap() fs.Object { return o.Object }
|
|
||||||
|
|
||||||
// Return a string version
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.Object.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the ID of the Object if possible
|
|
||||||
func (o *Object) ID() string {
|
|
||||||
if doer, ok := o.Object.(fs.IDer); ok {
|
|
||||||
return doer.ID()
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTier returns the Tier of the Object if possible
|
|
||||||
func (o *Object) GetTier() string {
|
|
||||||
if doer, ok := o.Object.(fs.GetTierer); ok {
|
|
||||||
return doer.GetTier()
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTier set the Tier of the Object if possible
|
|
||||||
func (o *Object) SetTier(tier string) error {
|
|
||||||
if doer, ok := o.Object.(fs.SetTierer); ok {
|
|
||||||
return doer.SetTier(tier)
|
|
||||||
}
|
|
||||||
return errors.New("SetTier not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
|
||||||
func (o *Object) MimeType(ctx context.Context) string {
|
|
||||||
if doer, ok := o.Object.(fs.MimeTyper); ok {
|
|
||||||
return doer.MimeType(ctx)
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.Purger = (*Fs)(nil)
|
|
||||||
_ fs.Copier = (*Fs)(nil)
|
|
||||||
_ fs.Mover = (*Fs)(nil)
|
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
|
||||||
_ fs.Commander = (*Fs)(nil)
|
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
|
||||||
_ fs.UnWrapper = (*Fs)(nil)
|
|
||||||
_ fs.ListRer = (*Fs)(nil)
|
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
|
||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
|
||||||
_ fs.UserInfoer = (*Fs)(nil)
|
|
||||||
_ fs.Disconnecter = (*Fs)(nil)
|
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
|
||||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
|
||||||
_ fs.IDer = (*Object)(nil)
|
|
||||||
_ fs.SetTierer = (*Object)(nil)
|
|
||||||
_ fs.GetTierer = (*Object)(nil)
|
|
||||||
_ fs.MimeTyper = (*Object)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,78 +0,0 @@
|
|||||||
package hasher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/rclone/rclone/lib/kv"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
|
|
||||||
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
||||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
|
||||||
_, o := fstests.PutTestContents(ctx, t, f, &item, data, true)
|
|
||||||
require.NotNil(t, o)
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) testUploadFromCrypt(t *testing.T) {
|
|
||||||
// make a temporary local remote
|
|
||||||
tempRoot, err := fstest.LocalRemote()
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() {
|
|
||||||
_ = os.RemoveAll(tempRoot)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// make a temporary crypt remote
|
|
||||||
ctx := context.Background()
|
|
||||||
pass := obscure.MustObscure("crypt")
|
|
||||||
remote := fmt.Sprintf(":crypt,remote=%s,password=%s:", tempRoot, pass)
|
|
||||||
cryptFs, err := fs.NewFs(ctx, remote)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// make a test file on the crypt remote
|
|
||||||
const dirName = "from_crypt_1"
|
|
||||||
const fileName = dirName + "/file_from_crypt_1"
|
|
||||||
const longTime = fs.ModTimeNotSupported
|
|
||||||
src := putFile(ctx, t, cryptFs, fileName, "doggy froggy")
|
|
||||||
|
|
||||||
// ensure that hash does not exist yet
|
|
||||||
_ = f.pruneHash(fileName)
|
|
||||||
hashType := f.keepHashes.GetOne()
|
|
||||||
hash, err := f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Empty(t, hash)
|
|
||||||
|
|
||||||
// upload file to hasher
|
|
||||||
in, err := src.Open(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
dst, err := f.Put(ctx, in, src)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotNil(t, dst)
|
|
||||||
|
|
||||||
// check that hash was created
|
|
||||||
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotEmpty(t, hash)
|
|
||||||
//t.Logf("hash is %q", hash)
|
|
||||||
_ = operations.Purge(ctx, f, dirName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalTest dispatches all internal tests
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
if !kv.Supported() {
|
|
||||||
t.Skip("hasher is not supported on this OS")
|
|
||||||
}
|
|
||||||
t.Run("UploadFromCrypt", f.testUploadFromCrypt)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
package hasher_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/hasher"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/rclone/rclone/lib/kv"
|
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/all" // for integration tests
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
if !kv.Supported() {
|
|
||||||
t.Skip("hasher is not supported on this OS")
|
|
||||||
}
|
|
||||||
opt := fstests.Opt{
|
|
||||||
RemoteName: *fstest.RemoteName,
|
|
||||||
NilObject: (*hasher.Object)(nil),
|
|
||||||
UnimplementableFsMethods: []string{
|
|
||||||
"OpenWriterAt",
|
|
||||||
},
|
|
||||||
UnimplementableObjectMethods: []string{},
|
|
||||||
}
|
|
||||||
if *fstest.RemoteName == "" {
|
|
||||||
tempDir := filepath.Join(os.TempDir(), "rclone-hasher-test")
|
|
||||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
|
||||||
{Name: "TestHasher", Key: "type", Value: "hasher"},
|
|
||||||
{Name: "TestHasher", Key: "remote", Value: tempDir},
|
|
||||||
}
|
|
||||||
opt.RemoteName = "TestHasher:"
|
|
||||||
}
|
|
||||||
fstests.Run(t, &opt)
|
|
||||||
}
|
|
||||||
@@ -1,315 +0,0 @@
|
|||||||
package hasher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/gob"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/lib/kv"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
|
||||||
anyFingerprint = "*"
|
|
||||||
)
|
|
||||||
|
|
||||||
type hashMap map[hash.Type]string
|
|
||||||
|
|
||||||
type hashRecord struct {
|
|
||||||
Fp string // fingerprint
|
|
||||||
Hashes operations.HashSums
|
|
||||||
Created time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *hashRecord) encode(key string) ([]byte, error) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := gob.NewEncoder(&buf).Encode(r); err != nil {
|
|
||||||
fs.Debugf(key, "hasher encoding %v: %v", r, err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return buf.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *hashRecord) decode(key string, data []byte) error {
|
|
||||||
if err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(r); err != nil {
|
|
||||||
fs.Debugf(key, "hasher decoding %q failed: %v", data, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// kvPrune: prune a single hash
|
|
||||||
type kvPrune struct {
|
|
||||||
key string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *kvPrune) Do(ctx context.Context, b kv.Bucket) error {
|
|
||||||
return b.Delete([]byte(op.key))
|
|
||||||
}
|
|
||||||
|
|
||||||
// kvPurge: delete a subtree
|
|
||||||
type kvPurge struct {
|
|
||||||
dir string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *kvPurge) Do(ctx context.Context, b kv.Bucket) error {
|
|
||||||
dir := op.dir
|
|
||||||
if !strings.HasSuffix(dir, "/") {
|
|
||||||
dir += "/"
|
|
||||||
}
|
|
||||||
var items []string
|
|
||||||
cur := b.Cursor()
|
|
||||||
bkey, _ := cur.Seek([]byte(dir))
|
|
||||||
for bkey != nil {
|
|
||||||
key := string(bkey)
|
|
||||||
if !strings.HasPrefix(key, dir) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
items = append(items, key[len(dir):])
|
|
||||||
bkey, _ = cur.Next()
|
|
||||||
}
|
|
||||||
nerr := 0
|
|
||||||
for _, sub := range items {
|
|
||||||
if err := b.Delete([]byte(dir + sub)); err != nil {
|
|
||||||
nerr++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fs.Debugf(dir, "%d hashes purged, %d failed", len(items)-nerr, nerr)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// kvMove: assign hashes to new path
|
|
||||||
type kvMove struct {
|
|
||||||
src string
|
|
||||||
dst string
|
|
||||||
dir bool
|
|
||||||
fs *Fs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *kvMove) Do(ctx context.Context, b kv.Bucket) error {
|
|
||||||
src, dst := op.src, op.dst
|
|
||||||
if !op.dir {
|
|
||||||
err := moveHash(b, src, dst)
|
|
||||||
fs.Debugf(op.fs, "moving cached hash %s to %s (err: %v)", src, dst, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.HasSuffix(src, "/") {
|
|
||||||
src += "/"
|
|
||||||
}
|
|
||||||
if !strings.HasSuffix(dst, "/") {
|
|
||||||
dst += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
var items []string
|
|
||||||
cur := b.Cursor()
|
|
||||||
bkey, _ := cur.Seek([]byte(src))
|
|
||||||
for bkey != nil {
|
|
||||||
key := string(bkey)
|
|
||||||
if !strings.HasPrefix(key, src) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
items = append(items, key[len(src):])
|
|
||||||
bkey, _ = cur.Next()
|
|
||||||
}
|
|
||||||
|
|
||||||
nerr := 0
|
|
||||||
for _, suffix := range items {
|
|
||||||
srcKey, dstKey := src+suffix, dst+suffix
|
|
||||||
err := moveHash(b, srcKey, dstKey)
|
|
||||||
fs.Debugf(op.fs, "Rename cache record %s -> %s (err: %v)", srcKey, dstKey, err)
|
|
||||||
if err != nil {
|
|
||||||
nerr++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fs.Debugf(op.fs, "%d hashes moved, %d failed", len(items)-nerr, nerr)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func moveHash(b kv.Bucket, src, dst string) error {
|
|
||||||
data := b.Get([]byte(src))
|
|
||||||
err := b.Delete([]byte(src))
|
|
||||||
if err != nil || len(data) == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return b.Put([]byte(dst), data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// kvGet: get single hash from database
|
|
||||||
type kvGet struct {
|
|
||||||
key string
|
|
||||||
fp string
|
|
||||||
hash string
|
|
||||||
val string
|
|
||||||
age time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *kvGet) Do(ctx context.Context, b kv.Bucket) error {
|
|
||||||
data := b.Get([]byte(op.key))
|
|
||||||
if len(data) == 0 {
|
|
||||||
return errors.New("no record")
|
|
||||||
}
|
|
||||||
var r hashRecord
|
|
||||||
if err := r.decode(op.key, data); err != nil {
|
|
||||||
return errors.New("invalid record")
|
|
||||||
}
|
|
||||||
if !(r.Fp == anyFingerprint || op.fp == anyFingerprint || r.Fp == op.fp) {
|
|
||||||
return errors.New("fingerprint changed")
|
|
||||||
}
|
|
||||||
if time.Since(r.Created) > op.age {
|
|
||||||
return errors.New("record timed out")
|
|
||||||
}
|
|
||||||
if r.Hashes != nil {
|
|
||||||
op.val = r.Hashes[op.hash]
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// kvPut: set hashes for an object by key
|
|
||||||
type kvPut struct {
|
|
||||||
key string
|
|
||||||
fp string
|
|
||||||
hashes operations.HashSums
|
|
||||||
age time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
|
|
||||||
data := b.Get([]byte(op.key))
|
|
||||||
var r hashRecord
|
|
||||||
if len(data) > 0 {
|
|
||||||
err = r.decode(op.key, data)
|
|
||||||
if err != nil || r.Fp != op.fp || time.Since(r.Created) > op.age {
|
|
||||||
r.Hashes = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(r.Hashes) == 0 {
|
|
||||||
r.Created = time.Now()
|
|
||||||
r.Hashes = operations.HashSums{}
|
|
||||||
r.Fp = op.fp
|
|
||||||
}
|
|
||||||
|
|
||||||
for hashType, hashVal := range op.hashes {
|
|
||||||
r.Hashes[hashType] = hashVal
|
|
||||||
}
|
|
||||||
if data, err = r.encode(op.key); err != nil {
|
|
||||||
return errors.Wrap(err, "marshal failed")
|
|
||||||
}
|
|
||||||
if err = b.Put([]byte(op.key), data); err != nil {
|
|
||||||
return errors.Wrap(err, "put failed")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// kvDump: dump the database.
|
|
||||||
// Note: long dump can cause concurrent operations to fail.
|
|
||||||
type kvDump struct {
|
|
||||||
full bool
|
|
||||||
root string
|
|
||||||
path string
|
|
||||||
fs *Fs
|
|
||||||
num int
|
|
||||||
total int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *kvDump) Do(ctx context.Context, b kv.Bucket) error {
|
|
||||||
f, baseRoot, dbPath := op.fs, op.root, op.path
|
|
||||||
|
|
||||||
if op.full {
|
|
||||||
total := 0
|
|
||||||
num := 0
|
|
||||||
_ = b.ForEach(func(bkey, data []byte) error {
|
|
||||||
total++
|
|
||||||
key := string(bkey)
|
|
||||||
include := (baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/"))
|
|
||||||
var r hashRecord
|
|
||||||
if err := r.decode(key, data); err != nil {
|
|
||||||
fs.Errorf(nil, "%s: invalid record: %v", key, err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
fmt.Println(f.dumpLine(&r, key, include, nil))
|
|
||||||
if include {
|
|
||||||
num++
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
fs.Infof(dbPath, "%d records out of %d", num, total)
|
|
||||||
op.num, op.total = num, total // for unit tests
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
num := 0
|
|
||||||
cur := b.Cursor()
|
|
||||||
var bkey, data []byte
|
|
||||||
if baseRoot != "" {
|
|
||||||
bkey, data = cur.Seek([]byte(baseRoot))
|
|
||||||
} else {
|
|
||||||
bkey, data = cur.First()
|
|
||||||
}
|
|
||||||
for bkey != nil {
|
|
||||||
key := string(bkey)
|
|
||||||
if !(baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/")) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
var r hashRecord
|
|
||||||
if err := r.decode(key, data); err != nil {
|
|
||||||
fs.Errorf(nil, "%s: invalid record: %v", key, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if key = strings.TrimPrefix(key[len(baseRoot):], "/"); key == "" {
|
|
||||||
key = "/"
|
|
||||||
}
|
|
||||||
fmt.Println(f.dumpLine(&r, key, true, nil))
|
|
||||||
num++
|
|
||||||
bkey, data = cur.Next()
|
|
||||||
}
|
|
||||||
fs.Infof(dbPath, "%d records", num)
|
|
||||||
op.num = num // for unit tests
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) dumpLine(r *hashRecord, path string, include bool, err error) string {
|
|
||||||
var status string
|
|
||||||
switch {
|
|
||||||
case !include:
|
|
||||||
status = "ext"
|
|
||||||
case err != nil:
|
|
||||||
status = "bad"
|
|
||||||
case r.Fp == anyFingerprint:
|
|
||||||
status = "stk"
|
|
||||||
default:
|
|
||||||
status = "ok "
|
|
||||||
}
|
|
||||||
|
|
||||||
var hashes []string
|
|
||||||
for _, hashType := range f.keepHashes.Array() {
|
|
||||||
hashName := hashType.String()
|
|
||||||
hashVal := r.Hashes[hashName]
|
|
||||||
if hashVal == "" || err != nil {
|
|
||||||
hashVal = "-"
|
|
||||||
}
|
|
||||||
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType), hashVal)
|
|
||||||
hashes = append(hashes, hashName+":"+hashVal)
|
|
||||||
}
|
|
||||||
hashesStr := strings.Join(hashes, " ")
|
|
||||||
|
|
||||||
age := time.Since(r.Created).Round(time.Second)
|
|
||||||
if age > 24*time.Hour {
|
|
||||||
age = age.Round(time.Hour)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
age = 0
|
|
||||||
}
|
|
||||||
ageStr := age.String()
|
|
||||||
if strings.HasSuffix(ageStr, "h0m0s") {
|
|
||||||
ageStr = strings.TrimSuffix(ageStr, "0m0s")
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("%s %s %9s %s", status, hashesStr, ageStr, path)
|
|
||||||
}
|
|
||||||
@@ -1,305 +0,0 @@
|
|||||||
package hasher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"path"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
)
|
|
||||||
|
|
||||||
// obtain hash for an object
|
|
||||||
func (o *Object) getHash(ctx context.Context, hashType hash.Type) (string, error) {
|
|
||||||
maxAge := time.Duration(o.f.opt.MaxAge)
|
|
||||||
if maxAge <= 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
fp := o.fingerprint(ctx)
|
|
||||||
if fp == "" {
|
|
||||||
return "", errors.New("fingerprint failed")
|
|
||||||
}
|
|
||||||
return o.f.getRawHash(ctx, hashType, o.Remote(), fp, maxAge)
|
|
||||||
}
|
|
||||||
|
|
||||||
// obtain hash for a path
|
|
||||||
func (f *Fs) getRawHash(ctx context.Context, hashType hash.Type, remote, fp string, age time.Duration) (string, error) {
|
|
||||||
key := path.Join(f.Fs.Root(), remote)
|
|
||||||
op := &kvGet{
|
|
||||||
key: key,
|
|
||||||
fp: fp,
|
|
||||||
hash: hashType.String(),
|
|
||||||
age: age,
|
|
||||||
}
|
|
||||||
err := f.db.Do(false, op)
|
|
||||||
return op.val, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// put new hashes for an object
|
|
||||||
func (o *Object) putHashes(ctx context.Context, rawHashes hashMap) error {
|
|
||||||
if o.f.opt.MaxAge <= 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
fp := o.fingerprint(ctx)
|
|
||||||
if fp == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
key := path.Join(o.f.Fs.Root(), o.Remote())
|
|
||||||
hashes := operations.HashSums{}
|
|
||||||
for hashType, hashVal := range rawHashes {
|
|
||||||
hashes[hashType.String()] = hashVal
|
|
||||||
}
|
|
||||||
return o.f.putRawHashes(ctx, key, fp, hashes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set hashes for a path without any validation
|
|
||||||
func (f *Fs) putRawHashes(ctx context.Context, key, fp string, hashes operations.HashSums) error {
|
|
||||||
return f.db.Do(true, &kvPut{
|
|
||||||
key: key,
|
|
||||||
fp: fp,
|
|
||||||
hashes: hashes,
|
|
||||||
age: time.Duration(f.opt.MaxAge),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the selected checksum of the file or "" if unavailable.
|
|
||||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (hashVal string, err error) {
|
|
||||||
f := o.f
|
|
||||||
if f.passHashes.Contains(hashType) {
|
|
||||||
fs.Debugf(o, "pass %s", hashType)
|
|
||||||
return o.Object.Hash(ctx, hashType)
|
|
||||||
}
|
|
||||||
if !f.suppHashes.Contains(hashType) {
|
|
||||||
fs.Debugf(o, "unsupp %s", hashType)
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
if hashVal, err = o.getHash(ctx, hashType); err != nil {
|
|
||||||
fs.Debugf(o, "getHash: %v", err)
|
|
||||||
err = nil
|
|
||||||
hashVal = ""
|
|
||||||
}
|
|
||||||
if hashVal != "" {
|
|
||||||
fs.Debugf(o, "cached %s = %q", hashType, hashVal)
|
|
||||||
return hashVal, nil
|
|
||||||
}
|
|
||||||
if f.slowHashes.Contains(hashType) {
|
|
||||||
fs.Debugf(o, "slow %s", hashType)
|
|
||||||
hashVal, err = o.Object.Hash(ctx, hashType)
|
|
||||||
if err == nil && hashVal != "" && f.keepHashes.Contains(hashType) {
|
|
||||||
if err = o.putHashes(ctx, hashMap{hashType: hashVal}); err != nil {
|
|
||||||
fs.Debugf(o, "putHashes: %v", err)
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return hashVal, err
|
|
||||||
}
|
|
||||||
if f.autoHashes.Contains(hashType) && o.Size() < int64(f.opt.AutoSize) {
|
|
||||||
_ = o.updateHashes(ctx)
|
|
||||||
if hashVal, err = o.getHash(ctx, hashType); err != nil {
|
|
||||||
fs.Debugf(o, "auto %s = %q (%v)", hashType, hashVal, err)
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return hashVal, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateHashes performs implicit "rclone hashsum --download" and updates cache.
|
|
||||||
func (o *Object) updateHashes(ctx context.Context) error {
|
|
||||||
r, err := o.Open(ctx)
|
|
||||||
if err != nil {
|
|
||||||
fs.Infof(o, "update failed (open): %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
_ = r.Close()
|
|
||||||
}()
|
|
||||||
if _, err = io.Copy(ioutil.Discard, r); err != nil {
|
|
||||||
fs.Infof(o, "update failed (copy): %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the object with the given data, time and size.
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
_ = o.f.pruneHash(src.Remote())
|
|
||||||
return o.Object.Update(ctx, in, src, options...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an object.
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
_ = o.f.pruneHash(o.Remote())
|
|
||||||
return o.Object.Remove(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the file.
|
|
||||||
// Also prunes the cache entry when modtime changes so that
|
|
||||||
// touching a file will trigger checksum recalculation even
|
|
||||||
// on backends that don't provide modTime with fingerprint.
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
|
||||||
if mtime != o.Object.ModTime(ctx) {
|
|
||||||
_ = o.f.pruneHash(o.Remote())
|
|
||||||
}
|
|
||||||
return o.Object.SetModTime(ctx, mtime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open opens the file for read.
|
|
||||||
// Full reads will also update object hashes.
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadCloser, err error) {
|
|
||||||
size := o.Size()
|
|
||||||
var offset, limit int64 = 0, -1
|
|
||||||
for _, option := range options {
|
|
||||||
switch opt := option.(type) {
|
|
||||||
case *fs.SeekOption:
|
|
||||||
offset = opt.Offset
|
|
||||||
case *fs.RangeOption:
|
|
||||||
offset, limit = opt.Decode(size)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if offset < 0 {
|
|
||||||
return nil, errors.New("invalid offset")
|
|
||||||
}
|
|
||||||
if limit < 0 {
|
|
||||||
limit = size - offset
|
|
||||||
}
|
|
||||||
if r, err = o.Object.Open(ctx, options...); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if offset != 0 || limit < size {
|
|
||||||
// It's a partial read
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
return o.f.newHashingReader(ctx, r, func(sums hashMap) {
|
|
||||||
if err := o.putHashes(ctx, sums); err != nil {
|
|
||||||
fs.Infof(o, "auto hashing error: %v", err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put data into the remote path with given modTime and size
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
var (
|
|
||||||
o *Object
|
|
||||||
common hash.Set
|
|
||||||
rehash bool
|
|
||||||
hashes hashMap
|
|
||||||
)
|
|
||||||
if fsrc := src.Fs(); fsrc != nil {
|
|
||||||
common = fsrc.Hashes().Overlap(f.keepHashes)
|
|
||||||
// Rehash if source does not have all required hashes or hashing is slow
|
|
||||||
rehash = fsrc.Features().SlowHash || common != f.keepHashes
|
|
||||||
}
|
|
||||||
|
|
||||||
wrapIn := in
|
|
||||||
if rehash {
|
|
||||||
r, err := f.newHashingReader(ctx, in, func(sums hashMap) {
|
|
||||||
hashes = sums
|
|
||||||
})
|
|
||||||
fs.Debugf(src, "Rehash in-fly due to incomplete or slow source set %v (err: %v)", common, err)
|
|
||||||
if err == nil {
|
|
||||||
wrapIn = r
|
|
||||||
} else {
|
|
||||||
rehash = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = f.pruneHash(src.Remote())
|
|
||||||
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
|
|
||||||
o = f.wrapObject(oResult, err)
|
|
||||||
if o == nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !rehash {
|
|
||||||
hashes = hashMap{}
|
|
||||||
for _, ht := range common.Array() {
|
|
||||||
if h, e := src.Hash(ctx, ht); e == nil && h != "" {
|
|
||||||
hashes[ht] = h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(hashes) > 0 {
|
|
||||||
err := o.putHashes(ctx, hashes)
|
|
||||||
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
|
|
||||||
}
|
|
||||||
return o, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type hashingReader struct {
|
|
||||||
rd io.Reader
|
|
||||||
hasher *hash.MultiHasher
|
|
||||||
fun func(hashMap)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) newHashingReader(ctx context.Context, rd io.Reader, fun func(hashMap)) (*hashingReader, error) {
|
|
||||||
hasher, err := hash.NewMultiHasherTypes(f.keepHashes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
hr := &hashingReader{
|
|
||||||
rd: rd,
|
|
||||||
hasher: hasher,
|
|
||||||
fun: fun,
|
|
||||||
}
|
|
||||||
return hr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *hashingReader) Read(p []byte) (n int, err error) {
|
|
||||||
n, err = r.rd.Read(p)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
r.hasher = nil
|
|
||||||
}
|
|
||||||
if r.hasher != nil {
|
|
||||||
if _, errHash := r.hasher.Write(p[:n]); errHash != nil {
|
|
||||||
r.hasher = nil
|
|
||||||
err = errHash
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err == io.EOF && r.hasher != nil {
|
|
||||||
r.fun(r.hasher.Sums())
|
|
||||||
r.hasher = nil
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *hashingReader) Close() error {
|
|
||||||
if rc, ok := r.rd.(io.ReadCloser); ok {
|
|
||||||
return rc.Close()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return object fingerprint or empty string in case of errors
|
|
||||||
//
|
|
||||||
// Note that we can't use the generic `fs.Fingerprint` here because
|
|
||||||
// this fingerprint is used to pick _derived hashes_ that are slow
|
|
||||||
// to calculate or completely unsupported by the base remote.
|
|
||||||
//
|
|
||||||
// The hasher fingerprint must be based on `fsHash`, the first _fast_
|
|
||||||
// hash supported _by the underlying remote_ (if there is one),
|
|
||||||
// while `fs.Fingerprint` would select a hash _produced by hasher_
|
|
||||||
// creating unresolvable fingerprint loop.
|
|
||||||
func (o *Object) fingerprint(ctx context.Context) string {
|
|
||||||
size := o.Object.Size()
|
|
||||||
timeStr := "-"
|
|
||||||
if o.f.fpTime {
|
|
||||||
timeStr = o.Object.ModTime(ctx).UTC().Format(timeFormat)
|
|
||||||
if timeStr == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
hashStr := "-"
|
|
||||||
if o.f.fpHash != hash.None {
|
|
||||||
var err error
|
|
||||||
hashStr, err = o.Object.Hash(ctx, o.f.fpHash)
|
|
||||||
if hashStr == "" || err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%d,%s,%s", size, timeStr, hashStr)
|
|
||||||
}
|
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
//go:build !plan9
|
|
||||||
// +build !plan9
|
// +build !plan9
|
||||||
|
|
||||||
package hdfs
|
package hdfs
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
//go:build !plan9
|
|
||||||
// +build !plan9
|
// +build !plan9
|
||||||
|
|
||||||
package hdfs
|
package hdfs
|
||||||
@@ -19,28 +18,35 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "namenode",
|
Name: "namenode",
|
||||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
Help: "hadoop name node and port",
|
||||||
Required: true,
|
Required: true,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "namenode:8020",
|
||||||
|
Help: "Connect to host namenode at port 8020",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "username",
|
Name: "username",
|
||||||
Help: "Hadoop user name.",
|
Help: "hadoop user name",
|
||||||
Required: false,
|
Required: false,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "root",
|
Value: "root",
|
||||||
Help: "Connect to hdfs as root.",
|
Help: "Connect to hdfs as root",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "service_principal_name",
|
Name: "service_principal_name",
|
||||||
Help: `Kerberos service principal name for the namenode.
|
Help: `Kerberos service principal name for the namenode
|
||||||
|
|
||||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
(<SERVICE>/<FQDN>) for the namenode.`,
|
||||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
|
||||||
Required: false,
|
Required: false,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "hdfs/namenode.hadoop.docker",
|
||||||
|
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
|
||||||
|
}},
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "data_transfer_protection",
|
Name: "data_transfer_protection",
|
||||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
|
Help: `Kerberos data transfer protection: authentication|integrity|privacy
|
||||||
|
|
||||||
Specifies whether or not authentication, data signature integrity
|
Specifies whether or not authentication, data signature integrity
|
||||||
checks, and wire encryption is required when communicating the the
|
checks, and wire encryption is required when communicating the the
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
// Test HDFS filesystem interface
|
// Test HDFS filesystem interface
|
||||||
|
|
||||||
//go:build !plan9
|
|
||||||
// +build !plan9
|
// +build !plan9
|
||||||
|
|
||||||
package hdfs_test
|
package hdfs_test
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
// Build for hdfs for unsupported platforms to stop go complaining
|
// Build for hdfs for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9
|
|
||||||
// +build plan9
|
// +build plan9
|
||||||
|
|
||||||
package hdfs
|
package hdfs
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
//go:build !plan9
|
|
||||||
// +build !plan9
|
// +build !plan9
|
||||||
|
|
||||||
package hdfs
|
package hdfs
|
||||||
|
|||||||
@@ -38,13 +38,20 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "url",
|
Name: "url",
|
||||||
Help: "URL of http host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
Help: "URL of http host to connect to",
|
||||||
Required: true,
|
Required: true,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "https://example.com",
|
||||||
|
Help: "Connect to example.com",
|
||||||
|
}, {
|
||||||
|
Value: "https://user:pass@example.com",
|
||||||
|
Help: "Connect to example.com using a username and password",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "headers",
|
Name: "headers",
|
||||||
Help: `Set HTTP headers for all transactions.
|
Help: `Set HTTP headers for all transactions
|
||||||
|
|
||||||
Use this to set additional HTTP headers for all transactions.
|
Use this to set additional HTTP headers for all transactions
|
||||||
|
|
||||||
The input format is comma separated list of key,value pairs. Standard
|
The input format is comma separated list of key,value pairs. Standard
|
||||||
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
||||||
@@ -57,7 +64,7 @@ You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "no_slash",
|
Name: "no_slash",
|
||||||
Help: `Set this if the site doesn't end directories with /.
|
Help: `Set this if the site doesn't end directories with /
|
||||||
|
|
||||||
Use this if your target website does not use / on the end of
|
Use this if your target website does not use / on the end of
|
||||||
directories.
|
directories.
|
||||||
@@ -73,7 +80,7 @@ directories.`,
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "no_head",
|
Name: "no_head",
|
||||||
Help: `Don't use HEAD requests to find file sizes in dir listing.
|
Help: `Don't use HEAD requests to find file sizes in dir listing
|
||||||
|
|
||||||
If your site is being very slow to load then you can try this option.
|
If your site is being very slow to load then you can try this option.
|
||||||
Normally rclone does a HEAD request for each potential file in a
|
Normally rclone does a HEAD request for each potential file in a
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
|||||||
ts := httptest.NewServer(handler)
|
ts := httptest.NewServer(handler)
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
configfile.Install()
|
configfile.LoadConfig(context.Background())
|
||||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||||
// fs.Config.DumpHeaders = true
|
// fs.Config.DumpHeaders = true
|
||||||
// fs.Config.DumpBodies = true
|
// fs.Config.DumpBodies = true
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -55,10 +56,11 @@ func init() {
|
|||||||
Name: "hubic",
|
Name: "hubic",
|
||||||
Description: "Hubic",
|
Description: "Hubic",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
err := oauthutil.Config(ctx, "hubic", name, m, oauthConfig, nil)
|
||||||
OAuth2Config: oauthConfig,
|
if err != nil {
|
||||||
})
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
|
}
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -368,7 +368,6 @@ type JottaFile struct {
|
|||||||
XMLName xml.Name
|
XMLName xml.Name
|
||||||
Name string `xml:"name,attr"`
|
Name string `xml:"name,attr"`
|
||||||
Deleted Flag `xml:"deleted,attr"`
|
Deleted Flag `xml:"deleted,attr"`
|
||||||
PublicURI string `xml:"publicURI"`
|
|
||||||
PublicSharePath string `xml:"publicSharePath"`
|
PublicSharePath string `xml:"publicSharePath"`
|
||||||
State string `xml:"currentRevision>state"`
|
State string `xml:"currentRevision>state"`
|
||||||
CreatedAt Time `xml:"currentRevision>created"`
|
CreatedAt Time `xml:"currentRevision>created"`
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -32,29 +32,29 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "The Koofr API endpoint to use.",
|
Help: "The Koofr API endpoint to use",
|
||||||
Default: "https://app.koofr.net",
|
Default: "https://app.koofr.net",
|
||||||
Required: true,
|
Required: true,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "mountid",
|
Name: "mountid",
|
||||||
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
|
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
|
||||||
Required: false,
|
Required: false,
|
||||||
Default: "",
|
Default: "",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "setmtime",
|
Name: "setmtime",
|
||||||
Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||||
Default: true,
|
Default: true,
|
||||||
Required: true,
|
Required: true,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "user",
|
Name: "user",
|
||||||
Help: "Your Koofr user name.",
|
Help: "Your Koofr user name",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "password",
|
Name: "password",
|
||||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -344,7 +344,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err e
|
|||||||
return nil, translateErrorsObject(err)
|
return nil, translateErrorsObject(err)
|
||||||
}
|
}
|
||||||
if info.Type == "dir" {
|
if info.Type == "dir" {
|
||||||
return nil, fs.ErrorIsDir
|
return nil, fs.ErrorNotAFile
|
||||||
}
|
}
|
||||||
return &Object{
|
return &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
@@ -534,7 +534,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// About reports space usage (with a MiB precision)
|
// About reports space usage (with a MB precision)
|
||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
mount, err := f.client.MountsDetails(f.mountID)
|
mount, err := f.client.MountsDetails(f.mountID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -608,25 +608,5 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", translateErrorsDir(err)
|
return "", translateErrorsDir(err)
|
||||||
}
|
}
|
||||||
|
return linkData.ShortURL, nil
|
||||||
// URL returned by API looks like following:
|
|
||||||
//
|
|
||||||
// https://app.koofr.net/links/35d9fb92-74a3-4930-b4ed-57f123bfb1a6
|
|
||||||
//
|
|
||||||
// Direct url looks like following:
|
|
||||||
//
|
|
||||||
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
|
|
||||||
//
|
|
||||||
// I am not sure about meaning of "path" parameter; in my expriments
|
|
||||||
// it is always "%2F", and omitting it or putting any other value
|
|
||||||
// results in 404.
|
|
||||||
//
|
|
||||||
// There is one more quirk: direct link to file in / returns that file,
|
|
||||||
// direct link to file somewhere else in hierarchy returns zip archive
|
|
||||||
// with one member.
|
|
||||||
link := linkData.URL
|
|
||||||
link = strings.ReplaceAll(link, "/links", "/content/links")
|
|
||||||
link += "/files/get?path=%2F"
|
|
||||||
|
|
||||||
return link, nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
//go:build darwin || dragonfly || freebsd || linux
|
|
||||||
// +build darwin dragonfly freebsd linux
|
// +build darwin dragonfly freebsd linux
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
//go:build windows
|
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|||||||
11
backend/local/encode_darwin.go
Normal file
11
backend/local/encode_darwin.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
//+build darwin
|
||||||
|
|
||||||
|
package local
|
||||||
|
|
||||||
|
import "github.com/rclone/rclone/lib/encoder"
|
||||||
|
|
||||||
|
// This is the encoding used by the local backend for macOS
|
||||||
|
//
|
||||||
|
// macOS can't store invalid UTF-8, it converts them into %XX encoding
|
||||||
|
const defaultEnc = (encoder.Base |
|
||||||
|
encoder.EncodeInvalidUtf8)
|
||||||
8
backend/local/encode_other.go
Normal file
8
backend/local/encode_other.go
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
//+build !windows,!darwin
|
||||||
|
|
||||||
|
package local
|
||||||
|
|
||||||
|
import "github.com/rclone/rclone/lib/encoder"
|
||||||
|
|
||||||
|
// This is the encoding used by the local backend for non windows platforms
|
||||||
|
const defaultEnc = encoder.Base
|
||||||
@@ -1,9 +1,10 @@
|
|||||||
//go:build windows
|
|
||||||
//+build windows
|
//+build windows
|
||||||
|
|
||||||
package encoder
|
package local
|
||||||
|
|
||||||
// OS is the encoding used by the local backend for windows platforms
|
import "github.com/rclone/rclone/lib/encoder"
|
||||||
|
|
||||||
|
// This is the encoding used by the local backend for windows platforms
|
||||||
//
|
//
|
||||||
// List of replaced characters:
|
// List of replaced characters:
|
||||||
// < (less than) -> '<' // FULLWIDTH LESS-THAN SIGN
|
// < (less than) -> '<' // FULLWIDTH LESS-THAN SIGN
|
||||||
@@ -23,10 +24,10 @@ package encoder
|
|||||||
// Also encode invalid UTF-8 bytes as Go can't convert them to UTF-16.
|
// Also encode invalid UTF-8 bytes as Go can't convert them to UTF-16.
|
||||||
//
|
//
|
||||||
// https://docs.microsoft.com/de-de/windows/desktop/FileIO/naming-a-file#naming-conventions
|
// https://docs.microsoft.com/de-de/windows/desktop/FileIO/naming-a-file#naming-conventions
|
||||||
const OS = (Base |
|
const defaultEnc = (encoder.Base |
|
||||||
EncodeWin |
|
encoder.EncodeWin |
|
||||||
EncodeBackSlash |
|
encoder.EncodeBackSlash |
|
||||||
EncodeCtl |
|
encoder.EncodeCtl |
|
||||||
EncodeRightSpace |
|
encoder.EncodeRightSpace |
|
||||||
EncodeRightPeriod |
|
encoder.EncodeRightPeriod |
|
||||||
EncodeInvalidUtf8)
|
encoder.EncodeInvalidUtf8)
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
//go:build !linux
|
|
||||||
//+build !linux
|
//+build !linux
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
//go:build linux
|
|
||||||
//+build linux
|
//+build linux
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
//go:build windows || plan9 || js
|
|
||||||
// +build windows plan9 js
|
// +build windows plan9 js
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
//go:build !windows && !plan9 && !js
|
|
||||||
// +build !windows,!plan9,!js
|
// +build !windows,!plan9,!js
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|||||||
@@ -27,7 +27,6 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/file"
|
"github.com/rclone/rclone/lib/file"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"golang.org/x/text/unicode/norm"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
@@ -44,11 +43,11 @@ func init() {
|
|||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "nounc",
|
Name: "nounc",
|
||||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
Help: "Disable UNC (long path names) conversion on Windows",
|
||||||
Advanced: runtime.GOOS != "windows",
|
Advanced: runtime.GOOS != "windows",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "true",
|
Value: "true",
|
||||||
Help: "Disables long file names.",
|
Help: "Disables long file names",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "copy_links",
|
Name: "copy_links",
|
||||||
@@ -59,7 +58,7 @@ func init() {
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "links",
|
Name: "links",
|
||||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension",
|
||||||
Default: false,
|
Default: false,
|
||||||
NoPrefix: true,
|
NoPrefix: true,
|
||||||
ShortOpt: "l",
|
ShortOpt: "l",
|
||||||
@@ -67,7 +66,6 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: "skip_links",
|
Name: "skip_links",
|
||||||
Help: `Don't warn about skipped symlinks.
|
Help: `Don't warn about skipped symlinks.
|
||||||
|
|
||||||
This flag disables warning messages on skipped symlinks or junction
|
This flag disables warning messages on skipped symlinks or junction
|
||||||
points, as you explicitly acknowledge that they should be skipped.`,
|
points, as you explicitly acknowledge that they should be skipped.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
@@ -75,39 +73,30 @@ points, as you explicitly acknowledge that they should be skipped.`,
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "zero_size_links",
|
Name: "zero_size_links",
|
||||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
Help: `Assume the Stat size of links is zero (and read them instead)
|
||||||
|
|
||||||
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
|
On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0.
|
||||||
|
However, on unix it reads as the length of the text in the link. This may cause errors like this when
|
||||||
|
syncing:
|
||||||
|
|
||||||
- Windows
|
Failed to copy: corrupted on transfer: sizes differ 0 vs 13
|
||||||
- On some virtual filesystems (such ash LucidLink)
|
|
||||||
- Android
|
|
||||||
|
|
||||||
So rclone now always reads the link.
|
Setting this flag causes rclone to read the link and use that as the size of the link
|
||||||
`,
|
instead of 0 which in most cases fixes the problem.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "unicode_normalization",
|
Name: "no_unicode_normalization",
|
||||||
Help: `Apply unicode NFC normalization to paths and filenames.
|
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||||
|
|
||||||
This flag can be used to normalize file names into unicode NFC form
|
This flag is deprecated now. Rclone no longer normalizes unicode file
|
||||||
that are read from the local filesystem.
|
names, but it compares them with unicode normalization in the sync
|
||||||
|
routine instead.`,
|
||||||
Rclone does not normally touch the encoding of file names it reads from
|
|
||||||
the file system.
|
|
||||||
|
|
||||||
This can be useful when using macOS as it normally provides decomposed (NFD)
|
|
||||||
unicode which in some language (eg Korean) doesn't display properly on
|
|
||||||
some OSes.
|
|
||||||
|
|
||||||
Note that rclone compares filenames with unicode normalization in the sync
|
|
||||||
routine so this flag shouldn't normally be used.`,
|
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "no_check_updated",
|
Name: "no_check_updated",
|
||||||
Help: `Don't check to see if the files change during upload.
|
Help: `Don't check to see if the files change during upload
|
||||||
|
|
||||||
Normally rclone checks the size and modification time of files as they
|
Normally rclone checks the size and modification time of files as they
|
||||||
are being uploaded and aborts with a message which starts "can't copy
|
are being uploaded and aborts with a message which starts "can't copy
|
||||||
@@ -153,7 +142,7 @@ to override the default choice.`,
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "case_insensitive",
|
Name: "case_insensitive",
|
||||||
Help: `Force the filesystem to report itself as case insensitive.
|
Help: `Force the filesystem to report itself as case insensitive
|
||||||
|
|
||||||
Normally the local backend declares itself as case insensitive on
|
Normally the local backend declares itself as case insensitive on
|
||||||
Windows/macOS and case sensitive for everything else. Use this flag
|
Windows/macOS and case sensitive for everything else. Use this flag
|
||||||
@@ -162,7 +151,7 @@ to override the default choice.`,
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "no_preallocate",
|
Name: "no_preallocate",
|
||||||
Help: `Disable preallocation of disk space for transferred files.
|
Help: `Disable preallocation of disk space for transferred files
|
||||||
|
|
||||||
Preallocation of disk space helps prevent filesystem fragmentation.
|
Preallocation of disk space helps prevent filesystem fragmentation.
|
||||||
However, some virtual filesystem layers (such as Google Drive File
|
However, some virtual filesystem layers (such as Google Drive File
|
||||||
@@ -173,7 +162,7 @@ Use this flag to disable preallocation.`,
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "no_sparse",
|
Name: "no_sparse",
|
||||||
Help: `Disable sparse files for multi-thread downloads.
|
Help: `Disable sparse files for multi-thread downloads
|
||||||
|
|
||||||
On Windows platforms rclone will make sparse files when doing
|
On Windows platforms rclone will make sparse files when doing
|
||||||
multi-thread downloads. This avoids long pauses on large files where
|
multi-thread downloads. This avoids long pauses on large files where
|
||||||
@@ -183,7 +172,7 @@ cause disk fragmentation and can be slow to work with.`,
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "no_set_modtime",
|
Name: "no_set_modtime",
|
||||||
Help: `Disable setting modtime.
|
Help: `Disable setting modtime
|
||||||
|
|
||||||
Normally rclone updates modification time of files after they are done
|
Normally rclone updates modification time of files after they are done
|
||||||
uploading. This can cause permissions issues on Linux platforms when
|
uploading. This can cause permissions issues on Linux platforms when
|
||||||
@@ -196,7 +185,7 @@ enabled, rclone will no longer update the modtime after copying a file.`,
|
|||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Default: encoder.OS,
|
Default: defaultEnc,
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
fs.Register(fsi)
|
fs.Register(fsi)
|
||||||
@@ -207,7 +196,8 @@ type Options struct {
|
|||||||
FollowSymlinks bool `config:"copy_links"`
|
FollowSymlinks bool `config:"copy_links"`
|
||||||
TranslateSymlinks bool `config:"links"`
|
TranslateSymlinks bool `config:"links"`
|
||||||
SkipSymlinks bool `config:"skip_links"`
|
SkipSymlinks bool `config:"skip_links"`
|
||||||
UTFNorm bool `config:"unicode_normalization"`
|
ZeroSizeLinks bool `config:"zero_size_links"`
|
||||||
|
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||||
NoCheckUpdated bool `config:"no_check_updated"`
|
NoCheckUpdated bool `config:"no_check_updated"`
|
||||||
NoUNC bool `config:"nounc"`
|
NoUNC bool `config:"nounc"`
|
||||||
OneFileSystem bool `config:"one_file_system"`
|
OneFileSystem bool `config:"one_file_system"`
|
||||||
@@ -266,6 +256,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
return nil, errLinksAndCopyLinks
|
return nil, errLinksAndCopyLinks
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opt.NoUTFNorm {
|
||||||
|
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||||
|
}
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
@@ -402,7 +396,7 @@ func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, erro
|
|||||||
|
|
||||||
}
|
}
|
||||||
if o.mode.IsDir() {
|
if o.mode.IsDir() {
|
||||||
return nil, fs.ErrorIsDir
|
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
|
||||||
}
|
}
|
||||||
return o, nil
|
return o, nil
|
||||||
}
|
}
|
||||||
@@ -468,10 +462,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
namepath := filepath.Join(fsDirPath, name)
|
namepath := filepath.Join(fsDirPath, name)
|
||||||
fi, fierr := os.Lstat(namepath)
|
fi, fierr := os.Lstat(namepath)
|
||||||
if os.IsNotExist(fierr) {
|
|
||||||
// skip entry removed by a concurrent goroutine
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fierr != nil {
|
if fierr != nil {
|
||||||
err = errors.Wrapf(err, "failed to read directory %q", namepath)
|
err = errors.Wrapf(err, "failed to read directory %q", namepath)
|
||||||
fs.Errorf(dir, "%v", fierr)
|
fs.Errorf(dir, "%v", fierr)
|
||||||
@@ -532,9 +522,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
|
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
|
||||||
if f.opt.UTFNorm {
|
|
||||||
filename = norm.NFC.String(filename)
|
|
||||||
}
|
|
||||||
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
|
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
|
||||||
|
|
||||||
if !utf8.ValidString(filename) {
|
if !utf8.ValidString(filename) {
|
||||||
@@ -570,8 +557,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
|
|
||||||
// Mkdir creates the directory if it doesn't exist
|
// Mkdir creates the directory if it doesn't exist
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
|
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
|
||||||
localPath := f.localPath(dir)
|
localPath := f.localPath(dir)
|
||||||
err := file.MkdirAll(localPath, 0777)
|
err := os.MkdirAll(localPath, 0777)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -765,7 +753,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
|
|
||||||
// Create parent of destination
|
// Create parent of destination
|
||||||
dstParentPath := filepath.Dir(dstPath)
|
dstParentPath := filepath.Dir(dstPath)
|
||||||
err = file.MkdirAll(dstParentPath, 0777)
|
err = os.MkdirAll(dstParentPath, 0777)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1099,7 +1087,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
// mkdirAll makes all the directories needed to store the object
|
// mkdirAll makes all the directories needed to store the object
|
||||||
func (o *Object) mkdirAll() error {
|
func (o *Object) mkdirAll() error {
|
||||||
dir := filepath.Dir(o.path)
|
dir := filepath.Dir(o.path)
|
||||||
return file.MkdirAll(dir, 0777)
|
return os.MkdirAll(dir, 0777)
|
||||||
}
|
}
|
||||||
|
|
||||||
type nopWriterCloser struct {
|
type nopWriterCloser struct {
|
||||||
@@ -1279,13 +1267,9 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
|||||||
o.modTime = info.ModTime()
|
o.modTime = info.ModTime()
|
||||||
o.mode = info.Mode()
|
o.mode = info.Mode()
|
||||||
o.fs.objectMetaMu.Unlock()
|
o.fs.objectMetaMu.Unlock()
|
||||||
// Read the size of the link.
|
// On Windows links read as 0 size so set the correct size here
|
||||||
//
|
// Optionally, users can turn this feature on with the zero_size_links flag
|
||||||
// The value in info.Size() is not always correct
|
if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink {
|
||||||
// - Windows links read as 0 size
|
|
||||||
// - Some virtual filesystems (such ash LucidLink) links read as 0 size
|
|
||||||
// - Android - some versions the links are larger than readlink suggests
|
|
||||||
if o.translatedLink {
|
|
||||||
linkdst, err := os.Readlink(o.path)
|
linkdst, err := os.Readlink(o.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to read link size: %v", err)
|
fs.Errorf(o, "Failed to read link size: %v", err)
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
// Device reading functions
|
// Device reading functions
|
||||||
|
|
||||||
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
|
|
||||||
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
|
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
// Device reading functions
|
// Device reading functions
|
||||||
|
|
||||||
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
|
|
||||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
//go:build !windows
|
|
||||||
//+build !windows
|
//+build !windows
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
//go:build windows
|
|
||||||
//+build windows
|
//+build windows
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
//go:build !windows && !plan9 && !js
|
|
||||||
// +build !windows,!plan9,!js
|
// +build !windows,!plan9,!js
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
//go:build windows || plan9 || js
|
|
||||||
// +build windows plan9 js
|
// +build windows plan9 js
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|||||||
@@ -3,8 +3,6 @@ package local
|
|||||||
import (
|
import (
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test Windows character replacements
|
// Test Windows character replacements
|
||||||
@@ -23,7 +21,7 @@ func TestCleanWindows(t *testing.T) {
|
|||||||
t.Skipf("windows only")
|
t.Skipf("windows only")
|
||||||
}
|
}
|
||||||
for _, test := range testsWindows {
|
for _, test := range testsWindows {
|
||||||
got := cleanRootPath(test[0], true, encoder.OS)
|
got := cleanRootPath(test[0], true, defaultEnc)
|
||||||
expect := test[1]
|
expect := test[1]
|
||||||
if got != expect {
|
if got != expect {
|
||||||
t.Fatalf("got %q, expected %q", got, expect)
|
t.Fatalf("got %q, expected %q", got, expect)
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -48,7 +48,7 @@ func (w *BinWriter) Reader() io.Reader {
|
|||||||
// WritePu16 writes a short as unsigned varint
|
// WritePu16 writes a short as unsigned varint
|
||||||
func (w *BinWriter) WritePu16(val int) {
|
func (w *BinWriter) WritePu16(val int) {
|
||||||
if val < 0 || val > 65535 {
|
if val < 0 || val > 65535 {
|
||||||
panic(fmt.Sprintf("Invalid UInt16 %v", val))
|
log.Fatalf("Invalid UInt16 %v", val)
|
||||||
}
|
}
|
||||||
w.WritePu64(int64(val))
|
w.WritePu64(int64(val))
|
||||||
}
|
}
|
||||||
@@ -56,7 +56,7 @@ func (w *BinWriter) WritePu16(val int) {
|
|||||||
// WritePu32 writes a signed long as unsigned varint
|
// WritePu32 writes a signed long as unsigned varint
|
||||||
func (w *BinWriter) WritePu32(val int64) {
|
func (w *BinWriter) WritePu32(val int64) {
|
||||||
if val < 0 || val > 4294967295 {
|
if val < 0 || val > 4294967295 {
|
||||||
panic(fmt.Sprintf("Invalid UInt32 %v", val))
|
log.Fatalf("Invalid UInt32 %v", val)
|
||||||
}
|
}
|
||||||
w.WritePu64(val)
|
w.WritePu64(val)
|
||||||
}
|
}
|
||||||
@@ -64,7 +64,7 @@ func (w *BinWriter) WritePu32(val int64) {
|
|||||||
// WritePu64 writes an unsigned (actually, signed) long as unsigned varint
|
// WritePu64 writes an unsigned (actually, signed) long as unsigned varint
|
||||||
func (w *BinWriter) WritePu64(val int64) {
|
func (w *BinWriter) WritePu64(val int64) {
|
||||||
if val < 0 {
|
if val < 0 {
|
||||||
panic(fmt.Sprintf("Invalid UInt64 %v", val))
|
log.Fatalf("Invalid UInt64 %v", val)
|
||||||
}
|
}
|
||||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||||
}
|
}
|
||||||
@@ -123,7 +123,7 @@ func (r *BinReader) check(err error) bool {
|
|||||||
r.err = err
|
r.err = err
|
||||||
}
|
}
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
panic(fmt.Sprintf("Error parsing response: %v", err))
|
log.Fatalf("Error parsing response: %v", err)
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -80,18 +80,18 @@ var oauthConfig = &oauth2.Config{
|
|||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
MrHashType = hash.RegisterHash("mailru", "MailruHash", 40, mrhash.New)
|
MrHashType = hash.RegisterHash("MailruHash", 40, mrhash.New)
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "mailru",
|
Name: "mailru",
|
||||||
Description: "Mail.ru Cloud",
|
Description: "Mail.ru Cloud",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "user",
|
Name: "user",
|
||||||
Help: "User name (usually email).",
|
Help: "User name (usually email)",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "pass",
|
Name: "pass",
|
||||||
Help: "Password.",
|
Help: "Password",
|
||||||
Required: true,
|
Required: true,
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -99,7 +99,6 @@ func init() {
|
|||||||
Default: true,
|
Default: true,
|
||||||
Advanced: false,
|
Advanced: false,
|
||||||
Help: `Skip full upload if there is another file with same data hash.
|
Help: `Skip full upload if there is another file with same data hash.
|
||||||
|
|
||||||
This feature is called "speedup" or "put by hash". It is especially efficient
|
This feature is called "speedup" or "put by hash". It is especially efficient
|
||||||
in case of generally available files like popular books, video or audio clips,
|
in case of generally available files like popular books, video or audio clips,
|
||||||
because files are searched by hash in all accounts of all mailru users.
|
because files are searched by hash in all accounts of all mailru users.
|
||||||
@@ -120,7 +119,6 @@ streaming or partial uploads), it will not even try this optimization.`,
|
|||||||
Default: "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf",
|
Default: "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Help: `Comma separated list of file name patterns eligible for speedup (put by hash).
|
Help: `Comma separated list of file name patterns eligible for speedup (put by hash).
|
||||||
|
|
||||||
Patterns are case insensitive and can contain '*' or '?' meta characters.`,
|
Patterns are case insensitive and can contain '*' or '?' meta characters.`,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "",
|
Value: "",
|
||||||
@@ -139,9 +137,8 @@ Patterns are case insensitive and can contain '*' or '?' meta characters.`,
|
|||||||
Name: "speedup_max_disk",
|
Name: "speedup_max_disk",
|
||||||
Default: fs.SizeSuffix(3 * 1024 * 1024 * 1024),
|
Default: fs.SizeSuffix(3 * 1024 * 1024 * 1024),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Help: `This option allows you to disable speedup (put by hash) for large files.
|
Help: `This option allows you to disable speedup (put by hash) for large files
|
||||||
|
(because preliminary hashing can exhaust you RAM or disk space)`,
|
||||||
Reason is that preliminary hashing can exhaust your RAM or disk space.`,
|
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "0",
|
Value: "0",
|
||||||
Help: "Completely disable speedup (put by hash).",
|
Help: "Completely disable speedup (put by hash).",
|
||||||
@@ -171,7 +168,7 @@ Reason is that preliminary hashing can exhaust your RAM or disk space.`,
|
|||||||
Name: "check_hash",
|
Name: "check_hash",
|
||||||
Default: true,
|
Default: true,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Help: "What should copy do if file checksum is mismatched or invalid.",
|
Help: "What should copy do if file checksum is mismatched or invalid",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "true",
|
Value: "true",
|
||||||
Help: "Fail with error.",
|
Help: "Fail with error.",
|
||||||
@@ -185,7 +182,6 @@ Reason is that preliminary hashing can exhaust your RAM or disk space.`,
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
Hide: fs.OptionHideBoth,
|
Hide: fs.OptionHideBoth,
|
||||||
Help: `HTTP user agent used internally by client.
|
Help: `HTTP user agent used internally by client.
|
||||||
|
|
||||||
Defaults to "rclone/VERSION" or "--user-agent" provided on command line.`,
|
Defaults to "rclone/VERSION" or "--user-agent" provided on command line.`,
|
||||||
}, {
|
}, {
|
||||||
Name: "quirks",
|
Name: "quirks",
|
||||||
@@ -193,7 +189,6 @@ Defaults to "rclone/VERSION" or "--user-agent" provided on command line.`,
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
Hide: fs.OptionHideBoth,
|
Hide: fs.OptionHideBoth,
|
||||||
Help: `Comma separated list of internal maintenance flags.
|
Help: `Comma separated list of internal maintenance flags.
|
||||||
|
|
||||||
This option must not be used by an ordinary user. It is intended only to
|
This option must not be used by an ordinary user. It is intended only to
|
||||||
facilitate remote troubleshooting of backend issues. Strict meaning of
|
facilitate remote troubleshooting of backend issues. Strict meaning of
|
||||||
flags is not documented and not guaranteed to persist between releases.
|
flags is not documented and not guaranteed to persist between releases.
|
||||||
@@ -1963,7 +1958,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) error {
|
|||||||
}
|
}
|
||||||
newObj, ok := entry.(*Object)
|
newObj, ok := entry.(*Object)
|
||||||
if !ok || dirSize >= 0 {
|
if !ok || dirSize >= 0 {
|
||||||
return fs.ErrorIsDir
|
return fs.ErrorNotAFile
|
||||||
}
|
}
|
||||||
if newObj.remote != o.remote {
|
if newObj.remote != o.remote {
|
||||||
return fmt.Errorf("File %q path has changed to %q", o.remote, newObj.remote)
|
return fmt.Errorf("File %q path has changed to %q", o.remote, newObj.remote)
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "user",
|
Name: "user",
|
||||||
Help: "User name.",
|
Help: "User name",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "pass",
|
Name: "pass",
|
||||||
@@ -303,7 +303,7 @@ func (f *Fs) findObject(rootNode *mega.Node, file string) (node *mega.Node, err
|
|||||||
if err == mega.ENOENT {
|
if err == mega.ENOENT {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
} else if err == nil && node.GetType() != mega.FILE {
|
} else if err == nil && node.GetType() != mega.FILE {
|
||||||
return nil, fs.ErrorIsDir // all other node types are directories
|
return nil, fs.ErrorNotAFile
|
||||||
}
|
}
|
||||||
return node, err
|
return node, err
|
||||||
}
|
}
|
||||||
@@ -958,7 +958,7 @@ func (o *Object) Size() int64 {
|
|||||||
// setMetaData sets the metadata from info
|
// setMetaData sets the metadata from info
|
||||||
func (o *Object) setMetaData(info *mega.Node) (err error) {
|
func (o *Object) setMetaData(info *mega.Node) (err error) {
|
||||||
if info.GetType() != mega.FILE {
|
if info.GetType() != mega.FILE {
|
||||||
return fs.ErrorIsDir // all other node types are directories
|
return fs.ErrorNotAFile
|
||||||
}
|
}
|
||||||
o.info = info
|
o.info = info
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user