mirror of
https://github.com/rclone/rclone.git
synced 2026-01-26 14:23:22 +00:00
Compare commits
1 Commits
v1.57.0
...
fix-sftp-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
97ade36d8c |
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -22,7 +22,7 @@ Link issues and relevant forum posts here.
|
||||
|
||||
#### Checklist
|
||||
|
||||
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-new-feature-or-bug-fix).
|
||||
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||
- [ ] I have added tests for all changes in this PR if appropriate.
|
||||
- [ ] I have added documentation for the changes if appropriate.
|
||||
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||
|
||||
234
.github/workflows/build.yml
vendored
234
.github/workflows/build.yml
vendored
@@ -25,12 +25,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.14', 'go1.15', 'go1.16']
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.16.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -41,7 +41,7 @@ jobs:
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macOS-latest
|
||||
go: '1.17.x'
|
||||
go: '1.16.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -50,14 +50,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macOS-latest
|
||||
go: '1.17.x'
|
||||
go: '1.16.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.17.x'
|
||||
go: '1.16.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.17.x'
|
||||
go: '1.16.x'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
@@ -78,11 +78,16 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.16.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.13
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.14
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
@@ -95,12 +100,6 @@ jobs:
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.16
|
||||
os: ubuntu-latest
|
||||
go: '1.16.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -203,6 +202,13 @@ jobs:
|
||||
librclone/python/test_rclone.py
|
||||
if: matrix.librclonetest
|
||||
|
||||
- name: Code quality test
|
||||
shell: bash
|
||||
run: |
|
||||
make build_dep
|
||||
make check
|
||||
if: matrix.check
|
||||
|
||||
- name: Compile all architectures test
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -222,126 +228,110 @@ jobs:
|
||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
lint:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Code quality test
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: latest
|
||||
|
||||
android:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go 1.14
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.14
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go 1.16
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
|
||||
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||
- name: build native rclone
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: build native rclone
|
||||
run: |
|
||||
make
|
||||
- name: install gomobile
|
||||
run: |
|
||||
go get golang.org/x/mobile/cmd/gobind
|
||||
go get golang.org/x/mobile/cmd/gomobile
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
- name: arm-v7a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||
|
||||
- name: install gomobile
|
||||
run: |
|
||||
go get golang.org/x/mobile/cmd/gobind
|
||||
go get golang.org/x/mobile/cmd/gomobile
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
- name: arm64-v8a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
- name: arm-v7a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
- name: x86 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||
|
||||
- name: arm64-v8a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
- name: x64 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||
|
||||
- name: x86 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x64 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
make ci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
make ci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
@@ -37,23 +37,35 @@ jobs:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
name: Build and publish docker volume plugin
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
- name: Set plugin parameters
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
GITHUB_REF=${{ github.ref }}
|
||||
|
||||
PLUGIN_IMAGE_USER=rclone
|
||||
PLUGIN_IMAGE_NAME=docker-volume-rclone
|
||||
PLUGIN_IMAGE_TAG=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_IMAGE=${PLUGIN_IMAGE_USER}/${PLUGIN_IMAGE_NAME}:${PLUGIN_IMAGE_TAG}
|
||||
PLUGIN_IMAGE_LATEST=${PLUGIN_IMAGE_USER}/${PLUGIN_IMAGE_NAME}:latest
|
||||
|
||||
echo "PLUGIN_IMAGE_USER=${PLUGIN_IMAGE_USER}" >> $GITHUB_ENV
|
||||
echo "PLUGIN_IMAGE_NAME=${PLUGIN_IMAGE_NAME}" >> $GITHUB_ENV
|
||||
echo "PLUGIN_IMAGE_TAG=${PLUGIN_IMAGE_TAG}" >> $GITHUB_ENV
|
||||
echo "PLUGIN_IMAGE=${PLUGIN_IMAGE}" >> $GITHUB_ENV
|
||||
echo "PLUGIN_IMAGE_LATEST=${PLUGIN_IMAGE_LATEST}" >> $GITHUB_ENV
|
||||
- name: Build image
|
||||
shell: bash
|
||||
run: |
|
||||
make docker-plugin
|
||||
- name: Push image
|
||||
shell: bash
|
||||
run: |
|
||||
docker login -u ${{ secrets.DOCKER_HUB_USER }} -p ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
make docker-plugin-push PLUGIN_IMAGE=${PLUGIN_IMAGE}
|
||||
make docker-plugin-push PLUGIN_IMAGE=${PLUGIN_IMAGE_LATEST}
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -14,4 +14,3 @@ fuzz-build.zip
|
||||
*.orig
|
||||
*.rej
|
||||
Thumbs.db
|
||||
__pycache__
|
||||
|
||||
@@ -5,7 +5,7 @@ linters:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- revive
|
||||
- golint
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
@@ -24,7 +24,3 @@ issues:
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 10m
|
||||
|
||||
@@ -48,12 +48,10 @@ Now [install Go](https://golang.org/doc/install) and verify your installation:
|
||||
|
||||
Great, you can now compile and execute your own version of rclone:
|
||||
|
||||
go build
|
||||
go build
|
||||
./rclone version
|
||||
|
||||
(Note that you can also replace `go build` with `make`, which will include a
|
||||
more accurate version number in the executable as well as enable you to specify
|
||||
more build options.) Finally make a branch to add your new feature
|
||||
Finally make a branch to add your new feature
|
||||
|
||||
git checkout -b my-new-feature
|
||||
|
||||
@@ -275,28 +273,9 @@ If you add a new general flag (not for a backend), then document it in
|
||||
alphabetical order.
|
||||
|
||||
If you add a new backend option/flag, then it should be documented in
|
||||
the source file in the `Help:` field.
|
||||
|
||||
* Start with the most important information about the option,
|
||||
as a single sentence on a single line.
|
||||
* This text will be used for the command-line flag help.
|
||||
* It will be combined with other information, such as any default value,
|
||||
and the result will look odd if not written as a single sentence.
|
||||
* It should end with a period/full stop character, which will be shown
|
||||
in docs but automatically removed when producing the flag help.
|
||||
* Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
||||
* More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
||||
* Like with docs generated from Markdown, a single line break is ignored
|
||||
and two line breaks creates a new paragraph.
|
||||
* This text will be shown to the user in `rclone config`
|
||||
and in the docs (where it will be added by `make backenddocs`,
|
||||
normally run some time before next release).
|
||||
* To create options of enumeration type use the `Examples:` field.
|
||||
* Each example value have their own `Help:` field, but they are treated
|
||||
a bit different than the main option help text. They will be shown
|
||||
as an unordered list, therefore a single line break is enough to
|
||||
create a new list item. Also, for enumeration texts like name of
|
||||
countries, it looks better without an ending period/full stop character.
|
||||
the source file in the `Help:` field. The first line of this is used
|
||||
for the flag help, the remainder is shown to the user in `rclone
|
||||
config` and is added to the docs with `make backenddocs`.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
|
||||
@@ -305,9 +284,7 @@ website` targets in the Makefile if you are interested in how. You
|
||||
don't need to run these when adding a feature.
|
||||
|
||||
Documentation for rclone sub commands is with their code, e.g.
|
||||
`cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
|
||||
line, without a period/full stop character at the end, as it will be
|
||||
combined unmodified with other information (such as any default value).
|
||||
`cmd/ls/ls.go`.
|
||||
|
||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||
for small changes in the docs which makes it very easy.
|
||||
|
||||
5883
MANUAL.html
generated
5883
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
6620
MANUAL.txt
generated
6620
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
45
Makefile
45
Makefile
@@ -258,31 +258,34 @@ winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
# docker volume plugin
|
||||
PLUGIN_USER ?= rclone
|
||||
PLUGIN_TAG ?= latest
|
||||
PLUGIN_BASE_TAG ?= latest
|
||||
PLUGIN_ARCH ?= amd64
|
||||
PLUGIN_IMAGE := $(PLUGIN_USER)/docker-volume-rclone:$(PLUGIN_TAG)
|
||||
PLUGIN_BASE := $(PLUGIN_USER)/rclone:$(PLUGIN_BASE_TAG)
|
||||
PLUGIN_IMAGE_USER ?= rclone
|
||||
PLUGIN_IMAGE_TAG ?= latest
|
||||
PLUGIN_IMAGE_NAME ?= docker-volume-rclone
|
||||
PLUGIN_IMAGE ?= $(PLUGIN_IMAGE_USER)/$(PLUGIN_IMAGE_NAME):$(PLUGIN_IMAGE_TAG)
|
||||
|
||||
PLUGIN_BASE_IMAGE := rclone/rclone:latest
|
||||
PLUGIN_BUILD_DIR := ./build/docker-plugin
|
||||
PLUGIN_CONTRIB_DIR := ./contrib/docker-plugin/managed
|
||||
PLUGIN_CONTRIB_DIR := ./cmd/serve/docker/contrib/plugin
|
||||
PLUGIN_CONFIG := $(PLUGIN_CONTRIB_DIR)/config.json
|
||||
PLUGIN_DOCKERFILE := $(PLUGIN_CONTRIB_DIR)/Dockerfile
|
||||
PLUGIN_CONTAINER := docker-volume-rclone-dev-$(shell date +'%Y%m%d-%H%M%S')
|
||||
|
||||
docker-plugin: docker-plugin-rootfs docker-plugin-create
|
||||
|
||||
docker-plugin-image: rclone
|
||||
docker build --no-cache --pull --build-arg BASE_IMAGE=${PLUGIN_BASE_IMAGE} -t ${PLUGIN_IMAGE} -f ${PLUGIN_DOCKERFILE} .
|
||||
|
||||
docker-plugin-rootfs: docker-plugin-image
|
||||
mkdir -p ${PLUGIN_BUILD_DIR}/rootfs
|
||||
docker create --name ${PLUGIN_CONTAINER} ${PLUGIN_IMAGE}
|
||||
docker export ${PLUGIN_CONTAINER} | tar -x -C ${PLUGIN_BUILD_DIR}/rootfs
|
||||
docker rm -vf ${PLUGIN_CONTAINER}
|
||||
cp ${PLUGIN_CONFIG} ${PLUGIN_BUILD_DIR}/config.json
|
||||
|
||||
docker-plugin-create:
|
||||
docker buildx inspect |grep -q /${PLUGIN_ARCH} || \
|
||||
docker run --rm --privileged tonistiigi/binfmt --install all
|
||||
rm -rf ${PLUGIN_BUILD_DIR}
|
||||
docker buildx build \
|
||||
--no-cache --pull \
|
||||
--build-arg BASE_IMAGE=${PLUGIN_BASE} \
|
||||
--platform linux/${PLUGIN_ARCH} \
|
||||
--output ${PLUGIN_BUILD_DIR}/rootfs \
|
||||
${PLUGIN_CONTRIB_DIR}
|
||||
cp ${PLUGIN_CONTRIB_DIR}/config.json ${PLUGIN_BUILD_DIR}
|
||||
docker plugin rm --force ${PLUGIN_IMAGE} 2>/dev/null || true
|
||||
docker plugin rm -f ${PLUGIN_IMAGE} 2>/dev/null || true
|
||||
docker plugin create ${PLUGIN_IMAGE} ${PLUGIN_BUILD_DIR}
|
||||
|
||||
docker-plugin-push:
|
||||
docker-plugin-push: docker-plugin-create
|
||||
docker plugin push ${PLUGIN_IMAGE}
|
||||
docker plugin rm ${PLUGIN_IMAGE}
|
||||
|
||||
docker-plugin: docker-plugin-create docker-plugin-push
|
||||
|
||||
@@ -32,6 +32,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
|
||||
@@ -20,7 +20,7 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote or path to alias.\n\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
Required: true,
|
||||
}},
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/hasher"
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
@@ -38,7 +37,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/seafile"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
|
||||
//go:build acd
|
||||
// +build acd
|
||||
|
||||
package amazonclouddrive_test
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -16,7 +15,6 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -75,7 +73,7 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Storage Account Name.\n\nLeave blank to use SAS URL or Emulator.",
|
||||
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)",
|
||||
}, {
|
||||
Name: "service_principal_file",
|
||||
Help: `Path to file containing credentials for use with a service principal.
|
||||
@@ -91,13 +89,13 @@ See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/a
|
||||
`,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Storage Account Key.\n\nLeave blank to use SAS URL or Emulator.",
|
||||
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)",
|
||||
}, {
|
||||
Name: "sas_url",
|
||||
Help: "SAS URL for container level access only.\n\nLeave blank if using account/key or Emulator.",
|
||||
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)",
|
||||
}, {
|
||||
Name: "use_msi",
|
||||
Help: `Use a managed service identity to authenticate (only works in Azure).
|
||||
Help: `Use a managed service identity to authenticate (only works in Azure)
|
||||
|
||||
When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
|
||||
to authenticate to Azure Storage instead of a SAS token or account key.
|
||||
@@ -110,27 +108,27 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "msi_object_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "msi_client_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "msi_mi_res_id",
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_emulator",
|
||||
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
|
||||
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
Help: "Endpoint for the service\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload (<= 256 MiB) (deprecated).",
|
||||
Help: "Cutoff for switching to chunked upload (<= 256 MiB). (Deprecated)",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
@@ -201,7 +199,6 @@ to start uploading.`,
|
||||
Default: memoryPoolFlushTime,
|
||||
Advanced: true,
|
||||
Help: `How often internal memory buffer pools will be flushed.
|
||||
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.`,
|
||||
}, {
|
||||
@@ -221,12 +218,12 @@ This option controls how often unused buffers will be removed from the pool.`,
|
||||
encoder.EncodeRightPeriod),
|
||||
}, {
|
||||
Name: "public_access",
|
||||
Help: "Public access level of a container: blob or container.",
|
||||
Help: "Public access level of a container: blob, container.",
|
||||
Default: string(azblob.PublicAccessNone),
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: string(azblob.PublicAccessNone),
|
||||
Help: "The container and its blobs can be accessed only with an authorized request.\nIt's a default value.",
|
||||
Help: "The container and its blobs can be accessed only with an authorized request. It's a default value",
|
||||
}, {
|
||||
Value: string(azblob.PublicAccessBlob),
|
||||
Help: "Blob data within this container can be read via anonymous request.",
|
||||
@@ -236,11 +233,6 @@ This option controls how often unused buffers will be removed from the pool.`,
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head_object",
|
||||
Help: `If set, do not do HEAD before GET when getting objects.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -266,7 +258,6 @@ type Options struct {
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
PublicAccess string `config:"public_access"`
|
||||
NoHeadObject bool `config:"no_head_object"`
|
||||
}
|
||||
|
||||
// Fs represents a remote azure server
|
||||
@@ -765,7 +756,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItemInternal) (fs
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if !o.fs.opt.NoHeadObject {
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1375,39 +1366,6 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataFromDownloadResponse(info *azblob.DownloadResponse) (err error) {
|
||||
metadata := info.NewMetadata()
|
||||
size := info.ContentLength()
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
||||
o.mimeType = info.ContentType()
|
||||
o.size = size
|
||||
o.modTime = info.LastModified()
|
||||
o.accessTier = o.AccessTier()
|
||||
o.setMetadata(metadata)
|
||||
|
||||
// If it was a Range request, the size is wrong, so correct it
|
||||
if contentRange := info.ContentRange(); contentRange != "" {
|
||||
slash := strings.IndexRune(contentRange, '/')
|
||||
if slash >= 0 {
|
||||
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
|
||||
if err == nil {
|
||||
o.size = i
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find length in %q", contentRange)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItemInternal) (err error) {
|
||||
metadata := info.Metadata
|
||||
size := *info.Properties.ContentLength
|
||||
@@ -1538,10 +1496,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open for download")
|
||||
}
|
||||
err = o.decodeMetaDataFromDownloadResponse(downloadResponse)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode metadata for download")
|
||||
}
|
||||
in = downloadResponse.Body(azblob.RetryReaderOptions{})
|
||||
return in, nil
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
// +build plan9 solaris js !go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -75,15 +75,15 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID.",
|
||||
Help: "Account ID or Application Key ID",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Application Key.",
|
||||
Help: "Application Key",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
Help: "Endpoint for the service.\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "test_mode",
|
||||
@@ -103,7 +103,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "versions",
|
||||
Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -121,7 +121,7 @@ This value should be set no larger than 4.657 GiB (== 5 GB).`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy.
|
||||
Help: `Cutoff for switching to multipart copy
|
||||
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of this size.
|
||||
@@ -131,19 +131,17 @@ The minimum is 0 and the maximum is 4.6 GiB.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size.
|
||||
Help: `Upload chunk size. Must fit in memory.
|
||||
|
||||
When uploading large files, chunk the file into this size.
|
||||
|
||||
Must fit in memory. These chunks are buffered in memory and there
|
||||
might a maximum of "--transfers" chunks in progress at once.
|
||||
|
||||
5,000,000 Bytes is the minimum size.`,
|
||||
When uploading large files, chunk the file into this size. Note that
|
||||
these chunks are buffered in memory and there might a maximum of
|
||||
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
|
||||
minimum size.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files.
|
||||
Help: `Disable checksums for large (> upload cutoff) files
|
||||
|
||||
Normally rclone will calculate the SHA1 checksum of the input before
|
||||
uploading it so it can add it to metadata on the object. This is great
|
||||
|
||||
@@ -61,7 +61,7 @@ func (e *Error) Error() string {
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
@@ -90,12 +90,6 @@ type Item struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Access string `json:"access,omitempty"`
|
||||
} `json:"shared_link"`
|
||||
OwnedBy struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Login string `json:"login"`
|
||||
} `json:"owned_by"`
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the item
|
||||
@@ -109,11 +103,10 @@ func (i *Item) ModTime() (t time.Time) {
|
||||
|
||||
// FolderItems is returned from the GetFolderItems call
|
||||
type FolderItems struct {
|
||||
TotalCount int `json:"total_count"`
|
||||
Entries []Item `json:"entries"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
NextMarker *string `json:"next_marker,omitempty"`
|
||||
TotalCount int `json:"total_count"`
|
||||
Entries []Item `json:"entries"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
Order []struct {
|
||||
By string `json:"by"`
|
||||
Direction string `json:"direction"`
|
||||
|
||||
@@ -22,8 +22,6 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -58,6 +56,7 @@ const (
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api.box.com/2.0"
|
||||
uploadURL = "https://upload.box.com/api/2.0"
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
||||
defaultUploadCutoff = 50 * 1024 * 1024
|
||||
tokenURL = "https://api.box.com/oauth2/token"
|
||||
@@ -110,19 +109,19 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
||||
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||
Help: "Box App Primary Access Token\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "box_sub_type",
|
||||
Default: "user",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "user",
|
||||
Help: "Rclone should act on behalf of a user.",
|
||||
Help: "Rclone should act on behalf of a user",
|
||||
}, {
|
||||
Value: "enterprise",
|
||||
Help: "Rclone should act on behalf of a service account.",
|
||||
Help: "Rclone should act on behalf of a service account",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
@@ -134,16 +133,6 @@ func init() {
|
||||
Help: "Max number of times to try committing a multipart file.",
|
||||
Default: 100,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Default: 1000,
|
||||
Help: "Size of listing chunk 1-1000.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "owned_by",
|
||||
Default: "",
|
||||
Help: "Only show items owned by the login (email address) passed in.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -258,8 +247,6 @@ type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
AccessToken string `config:"access_token"`
|
||||
ListChunk int `config:"list_chunk"`
|
||||
OwnedBy string `config:"owned_by"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
@@ -339,13 +326,6 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
authRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
|
||||
// Box API errors which should be retries
|
||||
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "operation_blocked_temporary" {
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -360,7 +340,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
info = item
|
||||
return true
|
||||
@@ -535,7 +515,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, true, false, true, func(item *api.Item) bool {
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
@@ -591,20 +571,17 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, activeOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folders/" + dirID + "/items",
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
opts.Parameters.Set("limit", strconv.Itoa(f.opt.ListChunk))
|
||||
opts.Parameters.Set("usemarker", "true")
|
||||
var marker *string
|
||||
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
|
||||
offset := 0
|
||||
OUTER:
|
||||
for {
|
||||
if marker != nil {
|
||||
opts.Parameters.Set("marker", *marker)
|
||||
}
|
||||
opts.Parameters.Set("offset", strconv.Itoa(offset))
|
||||
|
||||
var result api.FolderItems
|
||||
var resp *http.Response
|
||||
@@ -629,10 +606,7 @@ OUTER:
|
||||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||
continue
|
||||
}
|
||||
if activeOnly && item.ItemStatus != api.ItemStatusActive {
|
||||
continue
|
||||
}
|
||||
if f.opt.OwnedBy != "" && f.opt.OwnedBy != item.OwnedBy.Login {
|
||||
if item.ItemStatus != api.ItemStatusActive {
|
||||
continue
|
||||
}
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
@@ -641,8 +615,8 @@ OUTER:
|
||||
break OUTER
|
||||
}
|
||||
}
|
||||
marker = result.NextMarker
|
||||
if marker == nil {
|
||||
offset += result.Limit
|
||||
if offset >= result.TotalCount {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -664,7 +638,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.Type == api.ItemTypeFolder {
|
||||
// cache the directory ID for later lookups
|
||||
@@ -1118,36 +1092,45 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||
|
||||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
var (
|
||||
deleteErrors = int64(0)
|
||||
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
_, err = f.listAll(ctx, "trash", false, false, false, func(item *api.Item) bool {
|
||||
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
|
||||
wg.Add(1)
|
||||
concurrencyControl <- struct{}{}
|
||||
go func() {
|
||||
defer func() {
|
||||
<-concurrencyControl
|
||||
wg.Done()
|
||||
}()
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folders/trash/items",
|
||||
Parameters: url.Values{
|
||||
"fields": []string{"type", "id"},
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
|
||||
offset := 0
|
||||
for {
|
||||
opts.Parameters.Set("offset", strconv.Itoa(offset))
|
||||
|
||||
var result api.FolderItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't list trash")
|
||||
}
|
||||
for i := range result.Entries {
|
||||
item := &result.Entries[i]
|
||||
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
|
||||
err := f.deletePermanently(ctx, item.Type, item.ID)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
|
||||
atomic.AddInt64(&deleteErrors, 1)
|
||||
return errors.Wrap(err, "failed to delete file")
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||
} else {
|
||||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||
continue
|
||||
}
|
||||
}
|
||||
offset += result.Limit
|
||||
if offset >= result.TotalCount {
|
||||
break
|
||||
}
|
||||
return false
|
||||
})
|
||||
wg.Wait()
|
||||
if deleteErrors != 0 {
|
||||
return errors.Errorf("failed to delete %d trash items", deleteErrors)
|
||||
}
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing as an
|
||||
@@ -1201,9 +1184,6 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
if info.Type == api.ItemTypeFolder {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
if info.Type != api.ItemTypeFile {
|
||||
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
||||
}
|
||||
|
||||
32
backend/cache/cache.go
vendored
32
backend/cache/cache.go
vendored
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
@@ -69,26 +68,26 @@ func init() {
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "plex_url",
|
||||
Help: "The URL of the Plex server.",
|
||||
Help: "The URL of the Plex server",
|
||||
}, {
|
||||
Name: "plex_username",
|
||||
Help: "The username of the Plex user.",
|
||||
Help: "The username of the Plex user",
|
||||
}, {
|
||||
Name: "plex_password",
|
||||
Help: "The password of the Plex user.",
|
||||
Help: "The password of the Plex user",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "plex_token",
|
||||
Help: "The plex token for authentication - auto set normally.",
|
||||
Help: "The plex token for authentication - auto set normally",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "plex_insecure",
|
||||
Help: "Skip all certificate verification when connecting to the Plex server.",
|
||||
Help: "Skip all certificate verification when connecting to the Plex server",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
@@ -143,12 +142,12 @@ oldest chunks until it goes under this value.`,
|
||||
}},
|
||||
}, {
|
||||
Name: "db_path",
|
||||
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
|
||||
Help: "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name.",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_path",
|
||||
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: `Directory to cache chunk files.
|
||||
|
||||
Path to where partial file data (chunks) are stored locally. The remote
|
||||
@@ -168,7 +167,6 @@ then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
|
||||
Name: "chunk_clean_interval",
|
||||
Default: DefCacheChunkCleanInterval,
|
||||
Help: `How often should the cache perform cleanups of the chunk storage.
|
||||
|
||||
The default value should be ok for most people. If you find that the
|
||||
cache goes over "cache-chunk-total-size" too often then try to lower
|
||||
this value to force it to perform cleanups more often.`,
|
||||
@@ -222,7 +220,7 @@ available on the local machine.`,
|
||||
}, {
|
||||
Name: "rps",
|
||||
Default: int(DefCacheRps),
|
||||
Help: `Limits the number of requests per second to the source FS (-1 to disable).
|
||||
Help: `Limits the number of requests per second to the source FS (-1 to disable)
|
||||
|
||||
This setting places a hard limit on the number of requests per second
|
||||
that cache will be doing to the cloud provider remote and try to
|
||||
@@ -243,7 +241,7 @@ still pass.`,
|
||||
}, {
|
||||
Name: "writes",
|
||||
Default: DefCacheWrites,
|
||||
Help: `Cache file data on writes through the FS.
|
||||
Help: `Cache file data on writes through the FS
|
||||
|
||||
If you need to read files immediately after you upload them through
|
||||
cache you can enable this flag to have their data stored in the
|
||||
@@ -264,7 +262,7 @@ provider`,
|
||||
}, {
|
||||
Name: "tmp_wait_time",
|
||||
Default: DefCacheTmpWaitTime,
|
||||
Help: `How long should files be stored in local cache before being uploaded.
|
||||
Help: `How long should files be stored in local cache before being uploaded
|
||||
|
||||
This is the duration that a file must wait in the temporary location
|
||||
_cache-tmp-upload-path_ before it is selected for upload.
|
||||
@@ -275,7 +273,7 @@ to start the upload if a queue formed for this purpose.`,
|
||||
}, {
|
||||
Name: "db_wait_time",
|
||||
Default: DefCacheDbWaitTime,
|
||||
Help: `How long to wait for the DB to be available - 0 is unlimited.
|
||||
Help: `How long to wait for the DB to be available - 0 is unlimited
|
||||
|
||||
Only one process can have the DB open at any one time, so rclone waits
|
||||
for this duration for the DB to become available before it gives an
|
||||
@@ -422,8 +420,8 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
dbPath := f.opt.DbPath
|
||||
chunkPath := f.opt.ChunkPath
|
||||
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
|
||||
if dbPath != filepath.Join(config.GetCacheDir(), "cache-backend") &&
|
||||
chunkPath == filepath.Join(config.GetCacheDir(), "cache-backend") {
|
||||
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
|
||||
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
|
||||
chunkPath = dbPath
|
||||
}
|
||||
if filepath.Ext(dbPath) != "" {
|
||||
|
||||
17
backend/cache/cache_internal_test.go
vendored
17
backend/cache/cache_internal_test.go
vendored
@@ -1,5 +1,5 @@
|
||||
//go:build !plan9 && !js && !race
|
||||
// +build !plan9,!js,!race
|
||||
// +build !plan9,!js
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -294,9 +293,6 @@ func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
@@ -685,9 +681,6 @@ func TestInternalCacheWrites(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
@@ -926,9 +919,9 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
}
|
||||
}
|
||||
runInstance.rootIsCrypt = rootIsCrypt
|
||||
runInstance.dbPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote+".db")
|
||||
runInstance.chunkPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote)
|
||||
runInstance.vfsCachePath = filepath.Join(config.GetCacheDir(), "vfs", remote)
|
||||
runInstance.dbPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
|
||||
runInstance.chunkPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
|
||||
runInstance.vfsCachePath = filepath.Join(config.CacheDir, "vfs", remote)
|
||||
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
4
backend/cache/cache_test.go
vendored
4
backend/cache/cache_test.go
vendored
@@ -1,7 +1,7 @@
|
||||
// Test Cache filesystem interface
|
||||
|
||||
//go:build !plan9 && !js && !race
|
||||
// +build !plan9,!js,!race
|
||||
// +build !plan9,!js
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
1
backend/cache/cache_unsupported.go
vendored
1
backend/cache/cache_unsupported.go
vendored
@@ -1,7 +1,6 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || js
|
||||
// +build plan9 js
|
||||
|
||||
package cache
|
||||
|
||||
4
backend/cache/cache_upload_test.go
vendored
4
backend/cache/cache_upload_test.go
vendored
@@ -1,5 +1,5 @@
|
||||
//go:build !plan9 && !js && !race
|
||||
// +build !plan9,!js,!race
|
||||
// +build !plan9,!js
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
1
backend/cache/directory.go
vendored
1
backend/cache/directory.go
vendored
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
1
backend/cache/handle.go
vendored
1
backend/cache/handle.go
vendored
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
1
backend/cache/object.go
vendored
1
backend/cache/object.go
vendored
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
1
backend/cache/plex.go
vendored
1
backend/cache/plex.go
vendored
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
1
backend/cache/storage_memory.go
vendored
1
backend/cache/storage_memory.go
vendored
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
1
backend/cache/storage_persistent.go
vendored
1
backend/cache/storage_persistent.go
vendored
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
@@ -150,7 +150,6 @@ func init() {
|
||||
Name: "remote",
|
||||
Required: true,
|
||||
Help: `Remote to chunk/unchunk.
|
||||
|
||||
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||
"myremote:bucket" or maybe "myremote:" (not recommended).`,
|
||||
}, {
|
||||
@@ -164,7 +163,6 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||
Hide: fs.OptionHideCommandLine,
|
||||
Default: `*.rclone_chunk.###`,
|
||||
Help: `String format of chunk file names.
|
||||
|
||||
The two placeholders are: base file name (*) and chunk number (#...).
|
||||
There must be one and only one asterisk and one or more consecutive hash characters.
|
||||
If chunk number has less digits than the number of hashes, it is left-padded by zeros.
|
||||
@@ -176,57 +174,48 @@ Possible chunk files are ignored if their name does not match given format.`,
|
||||
Hide: fs.OptionHideCommandLine,
|
||||
Default: 1,
|
||||
Help: `Minimum valid chunk number. Usually 0 or 1.
|
||||
|
||||
By default chunk numbers start from 1.`,
|
||||
}, {
|
||||
Name: "meta_format",
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideCommandLine,
|
||||
Default: "simplejson",
|
||||
Help: `Format of the metadata object or "none".
|
||||
|
||||
By default "simplejson".
|
||||
Help: `Format of the metadata object or "none". By default "simplejson".
|
||||
Metadata is a small JSON file named after the composite file.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "none",
|
||||
Help: `Do not use metadata files at all.
|
||||
Requires hash type "none".`,
|
||||
Help: `Do not use metadata files at all. Requires hash type "none".`,
|
||||
}, {
|
||||
Value: "simplejson",
|
||||
Help: `Simple JSON supports hash sums and chunk validation.
|
||||
|
||||
It has the following fields: ver, size, nchunks, md5, sha1.`,
|
||||
}},
|
||||
}, {
|
||||
Name: "hash_type",
|
||||
Advanced: false,
|
||||
Default: "md5",
|
||||
Help: `Choose how chunker handles hash sums.
|
||||
|
||||
All modes but "none" require metadata.`,
|
||||
Help: `Choose how chunker handles hash sums. All modes but "none" require metadata.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "none",
|
||||
Help: `Pass any hash supported by wrapped remote for non-chunked files.
|
||||
Return nothing otherwise.`,
|
||||
Help: `Pass any hash supported by wrapped remote for non-chunked files, return nothing otherwise`,
|
||||
}, {
|
||||
Value: "md5",
|
||||
Help: `MD5 for composite files.`,
|
||||
Help: `MD5 for composite files`,
|
||||
}, {
|
||||
Value: "sha1",
|
||||
Help: `SHA1 for composite files.`,
|
||||
Help: `SHA1 for composite files`,
|
||||
}, {
|
||||
Value: "md5all",
|
||||
Help: `MD5 for all files.`,
|
||||
Help: `MD5 for all files`,
|
||||
}, {
|
||||
Value: "sha1all",
|
||||
Help: `SHA1 for all files.`,
|
||||
Help: `SHA1 for all files`,
|
||||
}, {
|
||||
Value: "md5quick",
|
||||
Help: `Copying a file to chunker will request MD5 from the source.
|
||||
Falling back to SHA1 if unsupported.`,
|
||||
Help: `Copying a file to chunker will request MD5 from the source falling back to SHA1 if unsupported`,
|
||||
}, {
|
||||
Value: "sha1quick",
|
||||
Help: `Similar to "md5quick" but prefers SHA1 over MD5.`,
|
||||
Help: `Similar to "md5quick" but prefers SHA1 over MD5`,
|
||||
}},
|
||||
}, {
|
||||
Name: "fail_hard",
|
||||
@@ -443,10 +432,10 @@ func (f *Fs) setHashType(hashType string) error {
|
||||
f.hashFallback = true
|
||||
case "md5all":
|
||||
f.useMD5 = true
|
||||
f.hashAll = !f.base.Hashes().Contains(hash.MD5) || f.base.Features().SlowHash
|
||||
f.hashAll = !f.base.Hashes().Contains(hash.MD5)
|
||||
case "sha1all":
|
||||
f.useSHA1 = true
|
||||
f.hashAll = !f.base.Hashes().Contains(hash.SHA1) || f.base.Features().SlowHash
|
||||
f.hashAll = !f.base.Hashes().Contains(hash.SHA1)
|
||||
default:
|
||||
return fmt.Errorf("unsupported hash type '%s'", hashType)
|
||||
}
|
||||
@@ -823,7 +812,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
tempEntries = append(tempEntries, wrapDir)
|
||||
default:
|
||||
if f.opt.FailHard {
|
||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
||||
return nil, fmt.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
fs.Debugf(f, "unknown object type %T", entry)
|
||||
}
|
||||
@@ -1110,7 +1099,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||
|
||||
switch o.f.opt.MetaFormat {
|
||||
case "simplejson":
|
||||
if len(data) > maxMetadataSizeWritten {
|
||||
if data != nil && len(data) > maxMetadataSizeWritten {
|
||||
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
@@ -1225,7 +1214,7 @@ func (f *Fs) put(
|
||||
// and skips the "EOF" read. Hence, switch to next limit here.
|
||||
if !(c.chunkLimit == 0 || c.chunkLimit == c.chunkSize || c.sizeTotal == -1 || c.done) {
|
||||
silentlyRemove(ctx, chunk)
|
||||
return nil, fmt.Errorf("destination ignored %d data bytes", c.chunkLimit)
|
||||
return nil, fmt.Errorf("Destination ignored %d data bytes", c.chunkLimit)
|
||||
}
|
||||
c.chunkLimit = c.chunkSize
|
||||
|
||||
@@ -1234,7 +1223,7 @@ func (f *Fs) put(
|
||||
|
||||
// Validate uploaded size
|
||||
if c.sizeTotal != -1 && c.readCount != c.sizeTotal {
|
||||
return nil, fmt.Errorf("incorrect upload size %d != %d", c.readCount, c.sizeTotal)
|
||||
return nil, fmt.Errorf("Incorrect upload size %d != %d", c.readCount, c.sizeTotal)
|
||||
}
|
||||
|
||||
// Check for input that looks like valid metadata
|
||||
@@ -1271,7 +1260,7 @@ func (f *Fs) put(
|
||||
sizeTotal += chunk.Size()
|
||||
}
|
||||
if sizeTotal != c.readCount {
|
||||
return nil, fmt.Errorf("incorrect chunks size %d != %d", sizeTotal, c.readCount)
|
||||
return nil, fmt.Errorf("Incorrect chunks size %d != %d", sizeTotal, c.readCount)
|
||||
}
|
||||
|
||||
// If previous object was chunked, remove its chunks
|
||||
@@ -2451,7 +2440,7 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
|
||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
||||
// Be strict about JSON format
|
||||
// to reduce possibility that a random small file resembles metadata.
|
||||
if len(data) > maxMetadataSizeWritten {
|
||||
if data != nil && len(data) > maxMetadataSizeWritten {
|
||||
return nil, false, ErrMetaTooBig
|
||||
}
|
||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
|
||||
@@ -12,8 +12,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
@@ -40,30 +38,6 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
||||
})
|
||||
}
|
||||
|
||||
type settings map[string]interface{}
|
||||
|
||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||
configMap := configmap.Simple{}
|
||||
for key, val := range opts {
|
||||
configMap[key] = fmt.Sprintf("%v", val)
|
||||
}
|
||||
rpath := fspath.JoinRootPath(f.Root(), path)
|
||||
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), rpath)
|
||||
fixFs, err := fs.NewFs(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
return fixFs
|
||||
}
|
||||
|
||||
var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
assert.NotNil(t, obj, message)
|
||||
return obj
|
||||
}
|
||||
|
||||
// test chunk name parser
|
||||
func testChunkNameFormat(t *testing.T, f *Fs) {
|
||||
saveOpt := f.opt
|
||||
@@ -643,13 +617,22 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
}()
|
||||
f.opt.FailHard = false
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||
item := fstest.Item{Path: name, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
assert.NotNil(t, obj, message)
|
||||
return obj
|
||||
}
|
||||
|
||||
runSubtest := func(contents, name string) {
|
||||
description := fmt.Sprintf("file with %s metadata", name)
|
||||
filename := path.Join(dir, name)
|
||||
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
||||
|
||||
part := testPutFile(ctx, t, f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
||||
_ = testPutFile(ctx, t, f, filename, contents, "upload "+description, false)
|
||||
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
||||
_ = putFile(f, filename, contents, "upload "+description, false)
|
||||
|
||||
obj, err := f.NewObject(ctx, filename)
|
||||
assert.NoError(t, err, "access "+description)
|
||||
@@ -695,7 +678,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
|
||||
// Test that chunker refuses to change on objects with future/unknown metadata
|
||||
func testFutureProof(t *testing.T, f *Fs) {
|
||||
if !f.useMeta {
|
||||
if f.opt.MetaFormat == "none" {
|
||||
t.Skip("this test requires metadata support")
|
||||
}
|
||||
|
||||
@@ -861,44 +844,6 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
}
|
||||
|
||||
// Test that md5all creates metadata even for small files
|
||||
func testMD5AllSlow(t *testing.T, f *Fs) {
|
||||
ctx := context.Background()
|
||||
fsResult := deriveFs(ctx, t, f, "md5all", settings{
|
||||
"chunk_size": "1P",
|
||||
"name_format": "*.#",
|
||||
"hash_type": "md5all",
|
||||
"transactions": "rename",
|
||||
"meta_format": "simplejson",
|
||||
})
|
||||
chunkFs, ok := fsResult.(*Fs)
|
||||
require.True(t, ok, "fs must be a chunker remote")
|
||||
baseFs := chunkFs.base
|
||||
if !baseFs.Features().SlowHash {
|
||||
t.Skipf("this test needs a base fs with slow hash, e.g. local")
|
||||
}
|
||||
|
||||
assert.True(t, chunkFs.useMD5, "must use md5")
|
||||
assert.True(t, chunkFs.hashAll, "must hash all files")
|
||||
|
||||
_ = testPutFile(ctx, t, chunkFs, "file", "-", "error", true)
|
||||
obj, err := chunkFs.NewObject(ctx, "file")
|
||||
require.NoError(t, err)
|
||||
sum, err := obj.Hash(ctx, hash.MD5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "336d5ebc5436534e61d16e63ddfca327", sum)
|
||||
|
||||
list, err := baseFs.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(list))
|
||||
_, err = baseFs.NewObject(ctx, "file")
|
||||
assert.NoError(t, err, "metadata must be created")
|
||||
_, err = baseFs.NewObject(ctx, "file.1")
|
||||
assert.NoError(t, err, "first chunk must be created")
|
||||
|
||||
require.NoError(t, operations.Purge(ctx, baseFs, ""))
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PutLarge", func(t *testing.T) {
|
||||
@@ -931,9 +876,6 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("ChunkerServerSideMove", func(t *testing.T) {
|
||||
testChunkerServerSideMove(t, f)
|
||||
})
|
||||
t.Run("MD5AllSlow", func(t *testing.T) {
|
||||
testMD5AllSlow(t, f)
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -83,23 +83,23 @@ func init() {
|
||||
Name: "level",
|
||||
Help: `GZIP compression level (-2 to 9).
|
||||
|
||||
Generally -1 (default, equivalent to 5) is recommended.
|
||||
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
||||
generally offers very little return.
|
||||
|
||||
Level -2 uses Huffmann encoding only. Only use if you know what you
|
||||
are doing.
|
||||
Level 0 turns off compression.`,
|
||||
Generally -1 (default, equivalent to 5) is recommended.
|
||||
Levels 1 to 9 increase compressiong at the cost of speed.. Going past 6
|
||||
generally offers very little return.
|
||||
|
||||
Level -2 uses Huffmann encoding only. Only use if you now what you
|
||||
are doing
|
||||
Level 0 turns off compression.`,
|
||||
Default: sgzip.DefaultCompression,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "ram_cache_limit",
|
||||
Help: `Some remotes don't allow the upload of files with unknown size.
|
||||
In this case the compressed file will need to be cached to determine
|
||||
it's size.
|
||||
|
||||
Files smaller than this limit will be cached in RAM, files larger than
|
||||
this limit will be cached on disk.`,
|
||||
In this case the compressed file will need to be cached to determine
|
||||
it's size.
|
||||
|
||||
Files smaller than this limit will be cached in RAM, file larger than
|
||||
this limit will be cached on disk`,
|
||||
Default: fs.SizeSuffix(20 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -1260,7 +1260,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
return o.Object.Open(ctx, options...)
|
||||
}
|
||||
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
|
||||
var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
|
||||
var openOptions []fs.OpenOption = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
|
||||
@@ -30,7 +30,7 @@ func init() {
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "filename_encryption",
|
||||
@@ -39,13 +39,13 @@ func init() {
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "standard",
|
||||
Help: "Encrypt the filenames.\nSee the docs for the details.",
|
||||
Help: "Encrypt the filenames see the docs for the details.",
|
||||
}, {
|
||||
Value: "obfuscate",
|
||||
Help: "Very simple filename obfuscation.",
|
||||
}, {
|
||||
Value: "off",
|
||||
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
|
||||
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
@@ -71,7 +71,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password2",
|
||||
Help: "Password or pass phrase for salt.\n\nOptional but recommended.\nShould be different to the previous password.",
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
@@ -363,11 +363,7 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
if f.opt.NoDataEncryption {
|
||||
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||
if err == nil && o != nil {
|
||||
o = f.newObject(o)
|
||||
}
|
||||
return o, err
|
||||
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||
}
|
||||
|
||||
// Encrypt the data into wrappedIn
|
||||
@@ -999,9 +995,6 @@ func (o *ObjectInfo) Size() int64 {
|
||||
if size < 0 {
|
||||
return size
|
||||
}
|
||||
if o.f.opt.NoDataEncryption {
|
||||
return size
|
||||
}
|
||||
return o.f.cipher.EncryptedSize(size)
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
@@ -68,7 +67,7 @@ const (
|
||||
defaultScope = "drive"
|
||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
|
||||
minChunkSize = 256 * fs.Kibi
|
||||
defaultChunkSize = 8 * fs.Mebi
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||
@@ -270,7 +269,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: `ID of the root folder.
|
||||
Help: `ID of the root folder
|
||||
Leave blank normally.
|
||||
|
||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
@@ -278,15 +277,15 @@ a non root folder as its starting point.
|
||||
`,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "team_drive",
|
||||
Help: "ID of the Shared Drive (Team Drive).",
|
||||
Help: "ID of the Shared Drive (Team Drive)",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -297,12 +296,12 @@ a non root folder as its starting point.
|
||||
}, {
|
||||
Name: "use_trash",
|
||||
Default: true,
|
||||
Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
|
||||
Help: "Send files to the trash instead of deleting permanently.\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_gdocs",
|
||||
Default: false,
|
||||
Help: "Skip google documents in all listings.\n\nIf given, gdocs practically become invisible to rclone.",
|
||||
Help: "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_checksum_gphotos",
|
||||
@@ -335,7 +334,7 @@ commands (copy, sync, etc.), and with all other commands too.`,
|
||||
}, {
|
||||
Name: "trashed_only",
|
||||
Default: false,
|
||||
Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
|
||||
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "starred_only",
|
||||
@@ -345,7 +344,7 @@ commands (copy, sync, etc.), and with all other commands too.`,
|
||||
}, {
|
||||
Name: "formats",
|
||||
Default: "",
|
||||
Help: "Deprecated: See export_formats.",
|
||||
Help: "Deprecated: see export_formats",
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
}, {
|
||||
@@ -361,12 +360,12 @@ commands (copy, sync, etc.), and with all other commands too.`,
|
||||
}, {
|
||||
Name: "allow_import_name_change",
|
||||
Default: false,
|
||||
Help: "Allow the filetype to change when uploading Google docs.\n\nE.g. file.doc to file.docx. This will confuse sync and reupload every time.",
|
||||
Help: "Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_created_date",
|
||||
Default: false,
|
||||
Help: `Use file created date instead of modified date.
|
||||
Help: `Use file created date instead of modified date.,
|
||||
|
||||
Useful when downloading data and you want the creation date used in
|
||||
place of the last modified date.
|
||||
@@ -400,7 +399,7 @@ date is used.`,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Default: 1000,
|
||||
Help: "Size of listing chunk 100-1000, 0 to disable.",
|
||||
Help: "Size of listing chunk 100-1000. 0 to disable.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
@@ -410,19 +409,17 @@ date is used.`,
|
||||
}, {
|
||||
Name: "alternate_export",
|
||||
Default: false,
|
||||
Help: "Deprecated: No longer needed.",
|
||||
Help: "Deprecated: no longer needed",
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Default: defaultChunkSize,
|
||||
Help: "Cutoff for switching to chunked upload.",
|
||||
Help: "Cutoff for switching to chunked upload",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Default: defaultChunkSize,
|
||||
Help: `Upload chunk size.
|
||||
|
||||
Must a power of 2 >= 256k.
|
||||
Help: `Upload chunk size. Must a power of 2 >= 256k.
|
||||
|
||||
Making this larger will improve performance, but note that each chunk
|
||||
is buffered in memory one per transfer.
|
||||
@@ -492,7 +489,7 @@ configurations.`,
|
||||
}, {
|
||||
Name: "disable_http2",
|
||||
Default: true,
|
||||
Help: `Disable drive using http2.
|
||||
Help: `Disable drive using http2
|
||||
|
||||
There is currently an unsolved issue with the google drive backend and
|
||||
HTTP/2. HTTP/2 is therefore disabled by default for the drive backend
|
||||
@@ -506,7 +503,7 @@ See: https://github.com/rclone/rclone/issues/3631
|
||||
}, {
|
||||
Name: "stop_on_upload_limit",
|
||||
Default: false,
|
||||
Help: `Make upload limit errors be fatal.
|
||||
Help: `Make upload limit errors be fatal
|
||||
|
||||
At the time of writing it is only possible to upload 750 GiB of data to
|
||||
Google Drive a day (this is an undocumented limit). When this limit is
|
||||
@@ -523,7 +520,7 @@ See: https://github.com/rclone/rclone/issues/3857
|
||||
}, {
|
||||
Name: "stop_on_download_limit",
|
||||
Default: false,
|
||||
Help: `Make download limit errors be fatal.
|
||||
Help: `Make download limit errors be fatal
|
||||
|
||||
At the time of writing it is only possible to download 10 TiB of data from
|
||||
Google Drive a day (this is an undocumented limit). When this limit is
|
||||
@@ -537,7 +534,7 @@ Google don't document so it may break in the future.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_shortcuts",
|
||||
Help: `If set skip shortcut files.
|
||||
Help: `If set skip shortcut files
|
||||
|
||||
Normally rclone dereferences shortcut files making them appear as if
|
||||
they are the original file (see [the shortcuts section](#shortcuts)).
|
||||
@@ -619,7 +616,6 @@ type Fs struct {
|
||||
client *http.Client // authorized client
|
||||
rootFolderID string // the id of the root folder
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
lastQuery string // Last query string to check in unit tests
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
exportExtensions []string // preferred extensions to download docs
|
||||
importMimeTypes []string // MIME types to convert to docs
|
||||
@@ -833,31 +829,11 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if filesOnly {
|
||||
query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
|
||||
}
|
||||
|
||||
// Constrain query using filter if this remote is a sync/copy/walk source.
|
||||
if fi, use := filter.GetConfig(ctx), filter.GetUseFilter(ctx); fi != nil && use {
|
||||
queryByTime := func(op string, tm time.Time) {
|
||||
if tm.IsZero() {
|
||||
return
|
||||
}
|
||||
// https://developers.google.com/drive/api/v3/ref-search-terms#operators
|
||||
// Query times use RFC 3339 format, default timezone is UTC
|
||||
timeStr := tm.UTC().Format("2006-01-02T15:04:05")
|
||||
term := fmt.Sprintf("(modifiedTime %s '%s' or mimeType = '%s')", op, timeStr, driveFolderType)
|
||||
query = append(query, term)
|
||||
}
|
||||
queryByTime(">=", fi.ModTimeFrom)
|
||||
queryByTime("<=", fi.ModTimeTo)
|
||||
}
|
||||
|
||||
list := f.svc.Files.List()
|
||||
queryString := strings.Join(query, " and ")
|
||||
if queryString != "" {
|
||||
list.Q(queryString)
|
||||
// fs.Debugf(f, "list query: %q", queryString)
|
||||
if len(query) > 0 {
|
||||
list.Q(strings.Join(query, " and "))
|
||||
// fmt.Printf("list Query = %q\n", query)
|
||||
}
|
||||
f.lastQuery = queryString // for unit tests
|
||||
|
||||
if f.opt.ListChunk > 0 {
|
||||
list.PageSize(f.opt.ListChunk)
|
||||
}
|
||||
@@ -1376,7 +1352,7 @@ func (f *Fs) newObjectWithExportInfo(
|
||||
}
|
||||
switch {
|
||||
case info.MimeType == driveFolderType:
|
||||
return nil, fs.ErrorIsDir
|
||||
return nil, fs.ErrorNotAFile
|
||||
case info.MimeType == shortcutMimeType:
|
||||
// We can only get here if f.opt.SkipShortcuts is set
|
||||
// and not from a listing. This is unlikely.
|
||||
@@ -2147,7 +2123,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
// Don't retry, return a retry error instead
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = f.svc.Files.Create(createInfo).
|
||||
Media(in, googleapi.ContentType(srcMimeType), googleapi.ChunkSize(0)).
|
||||
Media(in, googleapi.ContentType(srcMimeType)).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever).
|
||||
@@ -2924,7 +2900,7 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
||||
}
|
||||
isDir = true
|
||||
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
|
||||
if err != fs.ErrorIsDir {
|
||||
if err != fs.ErrorNotAFile {
|
||||
return nil, errors.Wrap(err, "can't find source")
|
||||
}
|
||||
// source was a directory
|
||||
@@ -2944,7 +2920,7 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
||||
if err != fs.ErrorObjectNotFound {
|
||||
if err == nil {
|
||||
err = errors.New("existing file")
|
||||
} else if err == fs.ErrorIsDir {
|
||||
} else if err == fs.ErrorNotAFile {
|
||||
err = errors.New("existing directory")
|
||||
}
|
||||
return nil, errors.Wrap(err, "not overwriting shortcut target")
|
||||
@@ -3164,7 +3140,7 @@ account.
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend [-o config] drives drive:
|
||||
rclone backend drives drive:
|
||||
|
||||
This will return a JSON list of objects like this
|
||||
|
||||
@@ -3181,22 +3157,6 @@ This will return a JSON list of objects like this
|
||||
}
|
||||
]
|
||||
|
||||
With the -o config parameter it will output the list in a format
|
||||
suitable for adding to a config file to make aliases for all the
|
||||
drives found.
|
||||
|
||||
[My Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
||||
|
||||
[Test Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. This may require manual editing
|
||||
of the names.
|
||||
|
||||
`,
|
||||
}, {
|
||||
Name: "untrash",
|
||||
@@ -3308,21 +3268,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
return f.makeShortcut(ctx, arg[0], dstFs, arg[1])
|
||||
case "drives":
|
||||
drives, err := f.listTeamDrives(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := opt["config"]; ok {
|
||||
lines := []string{}
|
||||
for _, drive := range drives {
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, fmt.Sprintf("[%s]", drive.Name))
|
||||
lines = append(lines, fmt.Sprintf("type = alias"))
|
||||
lines = append(lines, fmt.Sprintf("remote = %s,team_drive=%s,root_folder_id=:", f.name, drive.Id))
|
||||
}
|
||||
return lines, nil
|
||||
}
|
||||
return drives, nil
|
||||
return f.listTeamDrives(ctx)
|
||||
case "untrash":
|
||||
dir := ""
|
||||
if len(arg) > 0 {
|
||||
@@ -3670,7 +3616,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
|
||||
Media(in, googleapi.ContentType(uploadMimeType), googleapi.ChunkSize(0)).
|
||||
Media(in, googleapi.ContentType(uploadMimeType)).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(o.fs.opt.KeepRevisionForever).
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
@@ -18,10 +17,8 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
@@ -464,81 +461,6 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
||||
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
opt := &filter.Opt{}
|
||||
err := opt.MaxAge.Set("1h")
|
||||
assert.NoError(t, err)
|
||||
flt, err := filter.NewFilter(opt)
|
||||
assert.NoError(t, err)
|
||||
|
||||
defCtx := context.Background()
|
||||
fltCtx := filter.ReplaceConfig(defCtx, flt)
|
||||
|
||||
testCtx1 := fltCtx
|
||||
testCtx2 := filter.SetUseFilter(testCtx1, true)
|
||||
testCtx3, testCancel := context.WithCancel(testCtx2)
|
||||
testCtx4 := filter.SetUseFilter(testCtx3, false)
|
||||
testCancel()
|
||||
assert.False(t, filter.GetUseFilter(testCtx1))
|
||||
assert.True(t, filter.GetUseFilter(testCtx2))
|
||||
assert.True(t, filter.GetUseFilter(testCtx3))
|
||||
assert.False(t, filter.GetUseFilter(testCtx4))
|
||||
|
||||
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), "agequery-testdir")
|
||||
subFsResult, err := fs.NewFs(defCtx, subRemote)
|
||||
require.NoError(t, err)
|
||||
subFs, isDriveFs := subFsResult.(*Fs)
|
||||
require.True(t, isDriveFs)
|
||||
|
||||
tempDir1, err := ioutil.TempDir("", "rclone-drive-agequery1-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir1)
|
||||
}()
|
||||
tempFs1, err := fs.NewFs(defCtx, tempDir1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tempDir2, err := ioutil.TempDir("", "rclone-drive-agequery2-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir2)
|
||||
}()
|
||||
tempFs2, err := fs.NewFs(defCtx, tempDir2)
|
||||
require.NoError(t, err)
|
||||
|
||||
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
|
||||
_, _ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
|
||||
|
||||
// validate sync/copy
|
||||
const timeQuery = "(modifiedTime >= '"
|
||||
|
||||
assert.NoError(t, sync.CopyDir(defCtx, subFs, tempFs1, false))
|
||||
assert.NotContains(t, subFs.lastQuery, timeQuery)
|
||||
|
||||
assert.NoError(t, sync.CopyDir(fltCtx, subFs, tempFs1, false))
|
||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
||||
|
||||
assert.NoError(t, sync.CopyDir(fltCtx, tempFs2, subFs, false))
|
||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
||||
|
||||
assert.NoError(t, sync.CopyDir(defCtx, tempFs2, subFs, false))
|
||||
assert.NotContains(t, subFs.lastQuery, timeQuery)
|
||||
|
||||
// validate list/walk
|
||||
devNull, errOpen := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
|
||||
require.NoError(t, errOpen)
|
||||
defer func() {
|
||||
_ = devNull.Close()
|
||||
}()
|
||||
|
||||
assert.NoError(t, operations.List(defCtx, subFs, devNull))
|
||||
assert.NotContains(t, subFs.lastQuery, timeQuery)
|
||||
|
||||
assert.NoError(t, operations.List(fltCtx, subFs, devNull))
|
||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
@@ -556,7 +478,6 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
@@ -147,14 +147,8 @@ func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *f
|
||||
}
|
||||
var batchStatus *files.UploadSessionFinishBatchJobStatus
|
||||
sleepTime := 100 * time.Millisecond
|
||||
const maxSleepTime = 1 * time.Second
|
||||
startTime := time.Now()
|
||||
try := 1
|
||||
for {
|
||||
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
|
||||
if remaining < 0 {
|
||||
break
|
||||
}
|
||||
const maxTries = 120
|
||||
for try := 1; try <= maxTries; try++ {
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
|
||||
AsyncJobId: launchBatchStatus.AsyncJobId,
|
||||
@@ -162,25 +156,23 @@ func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *f
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d/%d", sleepTime, err, try, maxTries)
|
||||
} else {
|
||||
if batchStatus.Tag == "complete" {
|
||||
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
|
||||
return batchStatus.Complete, nil
|
||||
}
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d/%d", sleepTime, batchStatus.Tag, try, maxTries)
|
||||
}
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime *= 2
|
||||
if sleepTime > maxSleepTime {
|
||||
sleepTime = maxSleepTime
|
||||
if sleepTime > time.Second {
|
||||
sleepTime = time.Second
|
||||
}
|
||||
try++
|
||||
}
|
||||
if err == nil {
|
||||
err = errors.New("batch didn't complete")
|
||||
}
|
||||
return nil, errors.Wrapf(err, "wait for batch failed after %d tries in %v", try, time.Since(startTime))
|
||||
return nil, errors.Wrapf(err, "wait for batch failed after %d tries", maxTries)
|
||||
}
|
||||
|
||||
// commit a batch
|
||||
|
||||
@@ -31,13 +31,13 @@ import (
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/auth"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/common"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/sharing"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/team"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -66,7 +66,7 @@ const (
|
||||
//
|
||||
// Speed vs chunk size uploading a 1 GiB file on 2017-11-22
|
||||
//
|
||||
// Chunk Size MiB, Speed MiB/s, % of max
|
||||
// Chunk Size MiB, Speed MiByte/s, % of max
|
||||
// 1 1.364 11%
|
||||
// 2 2.443 19%
|
||||
// 4 4.288 33%
|
||||
@@ -154,7 +154,7 @@ func init() {
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
Help: fmt.Sprintf(`Upload chunk size (< %v).
|
||||
Help: fmt.Sprintf(`Upload chunk size. (< %v).
|
||||
|
||||
Any files larger than this will be uploaded in chunks of this size.
|
||||
|
||||
@@ -252,7 +252,7 @@ maximise throughput.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_timeout",
|
||||
Help: `Max time to allow an idle upload batch before uploading.
|
||||
Help: `Max time to allow an idle upload batch before uploading
|
||||
|
||||
If an upload batch is idle for more than this long then it will be
|
||||
uploaded.
|
||||
@@ -266,11 +266,6 @@ default based on the batch_mode in use.
|
||||
`,
|
||||
Default: fs.Duration(0),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_commit_timeout",
|
||||
Help: `Max time to wait for a batch to finish comitting`,
|
||||
Default: fs.Duration(10 * time.Minute),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -290,16 +285,15 @@ default based on the batch_mode in use.
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@@ -580,7 +574,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
// headerGenerator for dropbox sdk
|
||||
func (f *Fs) headerGenerator(hostType string, namespace string, route string) map[string]string {
|
||||
func (f *Fs) headerGenerator(hostType string, style string, namespace string, route string) map[string]string {
|
||||
if f.ns == "" {
|
||||
return map[string]string{}
|
||||
}
|
||||
@@ -630,9 +624,6 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *fi
|
||||
}
|
||||
fileInfo, ok := entry.(*files.FileMetadata)
|
||||
if !ok {
|
||||
if _, ok = entry.(*files.FolderMetadata); ok {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return fileInfo, nil
|
||||
@@ -794,7 +785,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
||||
fs: f,
|
||||
url: entry.PreviewUrl,
|
||||
remote: entryPath,
|
||||
modTime: *entry.TimeInvited,
|
||||
modTime: entry.TimeInvited,
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1169,7 +1160,14 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
}
|
||||
if expire < fs.DurationOff {
|
||||
expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
|
||||
createArg.Settings.Expires = &expiryTime
|
||||
createArg.Settings.Expires = expiryTime
|
||||
}
|
||||
// FIXME note we can't set Settings for non enterprise dropbox
|
||||
// because of https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75
|
||||
// however this only goes wrong when we set Expires, so as a
|
||||
// work-around remove Settings unless expire is set.
|
||||
if expire == fs.DurationOff {
|
||||
createArg.Settings = nil
|
||||
}
|
||||
|
||||
var linkRes sharing.IsSharedLinkMetadata
|
||||
@@ -1743,8 +1741,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
|
||||
commitInfo.Mode.Tag = "overwrite"
|
||||
// The Dropbox API only accepts timestamps in UTC with second precision.
|
||||
clientModified := src.ModTime(ctx).UTC().Round(time.Second)
|
||||
commitInfo.ClientModified = &clientModified
|
||||
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
|
||||
// Don't attempt to create filenames that are too long
|
||||
if cErr := checkPathLength(commitInfo.Path); cErr != nil {
|
||||
return cErr
|
||||
@@ -1769,7 +1766,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// This will only happen if we are uploading async batches
|
||||
if entry == nil {
|
||||
o.bytes = size
|
||||
o.modTime = *commitInfo.ClientModified
|
||||
o.modTime = commitInfo.ClientModified
|
||||
o.hash = "" // we don't have this
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -37,21 +37,21 @@ func init() {
|
||||
Description: "1Fichier",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
||||
Name: "api_key",
|
||||
}, {
|
||||
Help: "If you want to download a shared folder, add this parameter.",
|
||||
Help: "If you want to download a shared folder, add this parameter",
|
||||
Name: "shared_folder",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Help: "If you want to download a shared file that is password protected, add this parameter.",
|
||||
Help: "If you want to download a shared file that is password protected, add this parameter",
|
||||
Name: "file_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
|
||||
Help: "If you want to list the files in a shared folder that is password protected, add this parameter",
|
||||
Name: "folder_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
|
||||
@@ -69,29 +69,11 @@ func (i *Int) UnmarshalJSON(data []byte) error {
|
||||
return json.Unmarshal(data, (*int)(i))
|
||||
}
|
||||
|
||||
// String represents an string which can be represented in JSON as a
|
||||
// quoted string or an integer.
|
||||
type String string
|
||||
|
||||
// MarshalJSON turns a String into JSON
|
||||
func (s *String) MarshalJSON() (out []byte, err error) {
|
||||
return json.Marshal((*string)(s))
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a String
|
||||
func (s *String) UnmarshalJSON(data []byte) error {
|
||||
err := json.Unmarshal(data, (*string)(s))
|
||||
if err != nil {
|
||||
*s = String(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status return returned in all status responses
|
||||
type Status struct {
|
||||
Code string `json:"status"`
|
||||
Message string `json:"statusmessage"`
|
||||
TaskID String `json:"taskid"`
|
||||
TaskID string `json:"taskid"`
|
||||
// Warning string `json:"warning"` // obsolete
|
||||
}
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of the Enterprise File Fabric to connect to.",
|
||||
Help: "URL of the Enterprise File Fabric to connect to",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://storagemadeeasy.com",
|
||||
@@ -79,15 +79,14 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: `ID of the root folder.
|
||||
|
||||
Help: `ID of the root folder
|
||||
Leave blank normally.
|
||||
|
||||
Fill in to make rclone start with directory of a given ID.
|
||||
`,
|
||||
}, {
|
||||
Name: "permanent_token",
|
||||
Help: `Permanent Authentication Token.
|
||||
Help: `Permanent Authentication Token
|
||||
|
||||
A Permanent Authentication Token can be created in the Enterprise File
|
||||
Fabric, on the users Dashboard under Security, there is an entry
|
||||
@@ -100,7 +99,7 @@ For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
|
||||
`,
|
||||
}, {
|
||||
Name: "token",
|
||||
Help: `Session Token.
|
||||
Help: `Session Token
|
||||
|
||||
This is a session token which rclone caches in the config file. It is
|
||||
usually valid for 1 hour.
|
||||
@@ -110,14 +109,14 @@ Don't set this value - rclone will set it automatically.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "token_expiry",
|
||||
Help: `Token expiry time.
|
||||
Help: `Token expiry time
|
||||
|
||||
Don't set this value - rclone will set it automatically.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "version",
|
||||
Help: `Version read from the file fabric.
|
||||
Help: `Version read from the file fabric
|
||||
|
||||
Don't set this value - rclone will set it automatically.
|
||||
`,
|
||||
@@ -223,14 +222,13 @@ var retryStatusCodes = []struct {
|
||||
// delete in that folder. Please try again later or use
|
||||
// another name. (error_background)
|
||||
code: "error_background",
|
||||
sleep: 1 * time.Second,
|
||||
sleep: 6 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
// try should be the number of the tries so far, counting up from 1
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError, try int) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
@@ -246,10 +244,9 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
|
||||
for _, retryCode := range retryStatusCodes {
|
||||
if code == retryCode.code {
|
||||
if retryCode.sleep > 0 {
|
||||
// make this thread only sleep exponentially increasing extra time
|
||||
sleepTime := retryCode.sleep << (try - 1)
|
||||
fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", sleepTime, retryCode.code)
|
||||
time.Sleep(sleepTime)
|
||||
// make this thread only sleep extra time
|
||||
fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", retryCode.sleep, retryCode.code)
|
||||
time.Sleep(retryCode.sleep)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
@@ -403,13 +400,11 @@ func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKEr
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Options: options,
|
||||
}
|
||||
try := 0
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
try++
|
||||
// Refresh the body each retry
|
||||
opts.Body = strings.NewReader(data.Encode())
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, result)
|
||||
return f.shouldRetry(ctx, resp, err, result, try)
|
||||
return f.shouldRetry(ctx, resp, err, result)
|
||||
})
|
||||
if err != nil {
|
||||
return resp, err
|
||||
@@ -844,7 +839,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
}
|
||||
|
||||
// Wait for the the background task to complete if necessary
|
||||
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
|
||||
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID string) (err error) {
|
||||
if taskID == "" || taskID == "0" {
|
||||
// No task to wait for
|
||||
return nil
|
||||
@@ -1094,7 +1089,7 @@ func (o *Object) Size() int64 {
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
if info.Type != api.ItemTypeFile {
|
||||
return fs.ErrorIsDir
|
||||
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
||||
}
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
@@ -1283,11 +1278,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var contentLength = size
|
||||
opts.ContentLength = &contentLength // NB CallJSON scribbles on this which is naughty
|
||||
}
|
||||
try := 0
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
try++
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploader)
|
||||
return o.fs.shouldRetry(ctx, resp, err, nil, try)
|
||||
return o.fs.shouldRetry(ctx, resp, err, nil)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to upload")
|
||||
|
||||
@@ -48,23 +48,26 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Help: "FTP host to connect to",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ftp.example.com",
|
||||
Help: "Connect to ftp.example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + currentUser + ".",
|
||||
Help: "FTP username, leave blank for current username, " + currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21).",
|
||||
Help: "FTP port, leave blank to use default (21)",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password.",
|
||||
Help: "FTP password",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "tls",
|
||||
Help: `Use Implicit FTPS (FTP over TLS).
|
||||
|
||||
Help: `Use Implicit FTPS (FTP over TLS)
|
||||
When using implicit FTP over TLS the client connects using TLS
|
||||
right from the start which breaks compatibility with
|
||||
non-TLS-aware servers. This is usually served over port 990 rather
|
||||
@@ -72,41 +75,35 @@ than port 21. Cannot be used in combination with explicit FTP.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "explicit_tls",
|
||||
Help: `Use Explicit FTPS (FTP over TLS).
|
||||
|
||||
Help: `Use Explicit FTPS (FTP over TLS)
|
||||
When using explicit FTP over TLS the client explicitly requests
|
||||
security from the server in order to upgrade a plain text connection
|
||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_certificate",
|
||||
Help: "Do not verify the TLS certificate of the server.",
|
||||
Help: "Do not verify the TLS certificate of the server",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_epsv",
|
||||
Help: "Disable using EPSV even if server advertises support.",
|
||||
Help: "Disable using EPSV even if server advertises support",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_mlsd",
|
||||
Help: "Disable using MLSD even if server advertises support.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "writing_mdtm",
|
||||
Help: "Use MDTM to set modification time (VsFtpd quirk)",
|
||||
Help: "Disable using MLSD even if server advertises support",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Help: `Max time before closing idle connections.
|
||||
Help: `Max time before closing idle connections
|
||||
|
||||
If no connections have been returned to the connection pool in the time
|
||||
given, rclone will empty the connection pool.
|
||||
@@ -119,43 +116,17 @@ Set to 0 to keep connections indefinitely.
|
||||
Help: "Maximum time to wait for a response to close.",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "tls_cache_size",
|
||||
Help: `Size of TLS session cache for all control and data connections.
|
||||
|
||||
TLS cache allows to resume TLS sessions and reuse PSK between connections.
|
||||
Increase if default size is not enough resulting in TLS resumption errors.
|
||||
Enabled by default. Use 0 to disable.`,
|
||||
Default: 32,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_tls13",
|
||||
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shut_timeout",
|
||||
Help: "Maximum time to wait for data connection closing status.",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// The FTP protocol can't handle trailing spaces
|
||||
// (for instance, pureftpd turns them into '_')
|
||||
// The FTP protocol can't handle trailing spaces (for instance
|
||||
// pureftpd turns them into _)
|
||||
//
|
||||
// proftpd can't handle '*' in file names
|
||||
// pureftpd can't handle '[', ']' or '*'
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeRightSpace),
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "Asterisk,Ctl,Dot,Slash",
|
||||
Help: "ProFTPd can't handle '*' in file names",
|
||||
}, {
|
||||
Value: "BackSlash,Ctl,Del,Dot,RightSpace,Slash,SquareBracket",
|
||||
Help: "PureFTPd can't handle '[]' or '*' in file names",
|
||||
}, {
|
||||
Value: "Ctl,LeftPeriod,Slash",
|
||||
Help: "VsFTPd can't handle file names starting with dot",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -168,16 +139,12 @@ type Options struct {
|
||||
Port string `config:"port"`
|
||||
TLS bool `config:"tls"`
|
||||
ExplicitTLS bool `config:"explicit_tls"`
|
||||
TLSCacheSize int `config:"tls_cache_size"`
|
||||
DisableTLS13 bool `config:"disable_tls13"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
WritingMDTM bool `config:"writing_mdtm"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -198,9 +165,6 @@ type Fs struct {
|
||||
tokens *pacer.TokenDispenser
|
||||
tlsConf *tls.Config
|
||||
pacer *fs.Pacer // pacer for FTP connections
|
||||
fGetTime bool // true if the ftp library accepts GetTime
|
||||
fSetTime bool // true if the ftp library accepts SetTime
|
||||
fLstTime bool // true if the List call returns precise time
|
||||
}
|
||||
|
||||
// Object describes an FTP file
|
||||
@@ -215,7 +179,6 @@ type FileInfo struct {
|
||||
Name string
|
||||
Size uint64
|
||||
ModTime time.Time
|
||||
precise bool // true if the time is precise
|
||||
IsDir bool
|
||||
}
|
||||
|
||||
@@ -327,12 +290,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
if f.opt.DisableMLSD {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
||||
}
|
||||
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
|
||||
}
|
||||
if f.opt.WritingMDTM {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
|
||||
}
|
||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||
}
|
||||
@@ -470,12 +427,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
ServerName: opt.Host,
|
||||
InsecureSkipVerify: opt.SkipVerifyTLSCert,
|
||||
}
|
||||
if opt.TLSCacheSize > 0 {
|
||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
|
||||
}
|
||||
if opt.DisableTLS13 {
|
||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
||||
}
|
||||
}
|
||||
u := protocol + path.Join(dialAddr+"/", root)
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -504,12 +455,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFs")
|
||||
}
|
||||
f.fGetTime = c.IsGetTimeSupported()
|
||||
f.fSetTime = c.IsSetTimeSupported()
|
||||
f.fLstTime = c.IsTimePreciseInList()
|
||||
if !f.fLstTime && f.fGetTime {
|
||||
f.features.SlowModTime = true
|
||||
}
|
||||
f.putFtpConnection(&c, nil)
|
||||
if root != "" {
|
||||
// Check to see if the root actually an existing file
|
||||
@@ -628,12 +573,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
o.info = &FileInfo{
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: entry.Size,
|
||||
ModTime: entry.Time,
|
||||
precise: f.fLstTime,
|
||||
}
|
||||
o.info = info
|
||||
|
||||
return o, nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
@@ -728,7 +674,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
Name: newremote,
|
||||
Size: object.Size,
|
||||
ModTime: object.Time,
|
||||
precise: f.fLstTime,
|
||||
}
|
||||
o.info = info
|
||||
entries = append(entries, o)
|
||||
@@ -742,18 +687,8 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Precision shows whether modified time is supported or not depending on the
|
||||
// FTP server capabilities, namely whether FTP server:
|
||||
// - accepts the MDTM command to get file time (fGetTime)
|
||||
// or supports MLSD returning precise file time in the list (fLstTime)
|
||||
// - accepts the MFMT command to set file time (fSetTime)
|
||||
// or non-standard form of the MDTM command (fSetTime, too)
|
||||
// used by VsFtpd for the same purpose (WritingMDTM)
|
||||
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
|
||||
// Precision shows Modified Time not supported
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
if (f.fGetTime || f.fLstTime) && f.fSetTime {
|
||||
return time.Second
|
||||
}
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
@@ -805,7 +740,6 @@ func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err erro
|
||||
Name: remote,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
precise: f.fLstTime,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
@@ -991,41 +925,12 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
if !o.info.precise && o.fs.fGetTime {
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err == nil {
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
path = o.fs.opt.Enc.FromStandardPath(path)
|
||||
modTime, err := c.GetTime(path)
|
||||
if err == nil && o.info != nil {
|
||||
o.info.ModTime = modTime
|
||||
o.info.precise = true
|
||||
}
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
}
|
||||
return o.info.ModTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if !o.fs.fSetTime {
|
||||
fs.Errorf(o.fs, "SetModTime is not supported")
|
||||
return nil
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
path = o.fs.opt.Enc.FromStandardPath(path)
|
||||
err = c.SetTime(path, modTime.In(time.UTC))
|
||||
if err == nil && o.info != nil {
|
||||
o.info.ModTime = modTime
|
||||
o.info.precise = true
|
||||
}
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
// Storable returns a boolean as to whether this object is storable
|
||||
@@ -1058,11 +963,7 @@ func (f *ftpReadCloser) Close() error {
|
||||
errchan <- f.rc.Close()
|
||||
}()
|
||||
// Wait for Close for up to 60 seconds by default
|
||||
closeTimeout := f.f.opt.CloseTimeout
|
||||
if closeTimeout == 0 {
|
||||
closeTimeout = fs.DurationOff
|
||||
}
|
||||
timer := time.NewTimer(time.Duration(closeTimeout))
|
||||
timer := time.NewTimer(time.Duration(f.f.opt.CloseTimeout))
|
||||
select {
|
||||
case err = <-errchan:
|
||||
timer.Stop()
|
||||
@@ -1161,15 +1062,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
if err != nil {
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
// recycle connection in advance to let remove() find free token
|
||||
o.fs.putFtpConnection(nil, err)
|
||||
remove()
|
||||
o.fs.putFtpConnection(nil, err)
|
||||
return errors.Wrap(err, "update stor")
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
o.info, err = o.fs.getInfo(ctx, path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "update getinfo")
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type settings map[string]interface{}
|
||||
|
||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
|
||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||
configMap := configmap.Simple{}
|
||||
for key, val := range opts {
|
||||
configMap[key] = fmt.Sprintf("%v", val)
|
||||
}
|
||||
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), f.Root())
|
||||
fixFs, err := fs.NewFs(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
return fixFs
|
||||
}
|
||||
|
||||
// test that big file uploads do not cause network i/o timeout
|
||||
func (f *Fs) testUploadTimeout(t *testing.T) {
|
||||
const (
|
||||
fileSize = 100000000 // 100 MiB
|
||||
idleTimeout = 40 * time.Millisecond // small because test server is local
|
||||
maxTime = 5 * time.Second // prevent test hangup
|
||||
)
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("not running with -short")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
saveLowLevelRetries := ci.LowLevelRetries
|
||||
saveTimeout := ci.Timeout
|
||||
defer func() {
|
||||
ci.LowLevelRetries = saveLowLevelRetries
|
||||
ci.Timeout = saveTimeout
|
||||
}()
|
||||
ci.LowLevelRetries = 1
|
||||
ci.Timeout = idleTimeout
|
||||
|
||||
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
|
||||
fixFs := deriveFs(ctx, t, f, settings{
|
||||
"concurrency": concurrency,
|
||||
"shut_timeout": shutTimeout,
|
||||
})
|
||||
|
||||
// Make test object
|
||||
fileTime := fstest.Time("2020-03-08T09:30:00.000000000Z")
|
||||
meta := object.NewStaticObjectInfo("upload-timeout.test", fileTime, int64(fileSize), true, nil, nil)
|
||||
data := readers.NewPatternReader(int64(fileSize))
|
||||
|
||||
// Run upload and ensure maximum time
|
||||
done := make(chan bool)
|
||||
deadline := time.After(maxTime)
|
||||
go func() {
|
||||
obj, err = fixFs.Put(ctx, data, meta)
|
||||
done <- true
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-deadline:
|
||||
t.Fatalf("Upload got stuck for %v !", maxTime)
|
||||
}
|
||||
|
||||
return obj, err
|
||||
}
|
||||
|
||||
// non-zero shut_timeout should fix i/o errors
|
||||
obj, err := upload(f.opt.Concurrency, time.Second)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, obj)
|
||||
if obj != nil {
|
||||
_ = obj.Remove(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// rclone must support precise time with ProFtpd and PureFtpd out of the box.
|
||||
// The VsFtpd server does not support the MFMT command to set file time like
|
||||
// other servers but by default supports the MDTM command in the non-standard
|
||||
// two-argument form for the same purpose.
|
||||
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
|
||||
func (f *Fs) testTimePrecision(t *testing.T) {
|
||||
name := f.Name()
|
||||
if pos := strings.Index(name, "{"); pos != -1 {
|
||||
name = name[:pos]
|
||||
}
|
||||
switch name {
|
||||
case "TestFTPProftpd", "TestFTPPureftpd", "TestFTPVsftpd":
|
||||
assert.LessOrEqual(t, f.Precision(), time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("UploadTimeout", f.testUploadTimeout)
|
||||
t.Run("TimePrecision", f.testTimePrecision)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
@@ -9,27 +9,25 @@ import (
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against rclone FTP server
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTPRclone:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegrationProftpd runs integration tests against proFTPd
|
||||
func TestIntegrationProftpd(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTPProftpd:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegrationPureftpd runs integration tests against pureFTPd
|
||||
func TestIntegrationPureftpd(t *testing.T) {
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTPRclone:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration3(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
@@ -39,13 +37,12 @@ func TestIntegrationPureftpd(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegrationVsftpd runs integration tests against vsFTPd
|
||||
func TestIntegrationVsftpd(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTPVsftpd:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
// func TestIntegration4(t *testing.T) {
|
||||
// if *fstest.RemoteName != "" {
|
||||
// t.Skip("skipping as -remote is set")
|
||||
// }
|
||||
// fstests.Run(t, &fstests.Opt{
|
||||
// RemoteName: "TestFTPVsftpd:",
|
||||
// NilObject: (*ftp.Object)(nil),
|
||||
// })
|
||||
// }
|
||||
|
||||
@@ -89,58 +89,58 @@ func init() {
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "project_number",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: "anonymous",
|
||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "object_acl",
|
||||
Help: "Access Control List for new objects.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "authenticatedRead",
|
||||
Help: "Object owner gets OWNER access.\nAll Authenticated Users get READER access.",
|
||||
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.",
|
||||
}, {
|
||||
Value: "bucketOwnerFullControl",
|
||||
Help: "Object owner gets OWNER access.\nProject team owners get OWNER access.",
|
||||
Help: "Object owner gets OWNER access, and project team owners get OWNER access.",
|
||||
}, {
|
||||
Value: "bucketOwnerRead",
|
||||
Help: "Object owner gets OWNER access.\nProject team owners get READER access.",
|
||||
Help: "Object owner gets OWNER access, and project team owners get READER access.",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Object owner gets OWNER access.\nDefault if left blank.",
|
||||
Help: "Object owner gets OWNER access [default if left blank].",
|
||||
}, {
|
||||
Value: "projectPrivate",
|
||||
Help: "Object owner gets OWNER access.\nProject team members get access according to their roles.",
|
||||
Help: "Object owner gets OWNER access, and project team members get access according to their roles.",
|
||||
}, {
|
||||
Value: "publicRead",
|
||||
Help: "Object owner gets OWNER access.\nAll Users get READER access.",
|
||||
Help: "Object owner gets OWNER access, and all Users get READER access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_acl",
|
||||
Help: "Access Control List for new buckets.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "authenticatedRead",
|
||||
Help: "Project team owners get OWNER access.\nAll Authenticated Users get READER access.",
|
||||
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Project team owners get OWNER access.\nDefault if left blank.",
|
||||
Help: "Project team owners get OWNER access [default if left blank].",
|
||||
}, {
|
||||
Value: "projectPrivate",
|
||||
Help: "Project team members get access according to their roles.",
|
||||
}, {
|
||||
Value: "publicRead",
|
||||
Help: "Project team owners get OWNER access.\nAll Users get READER access.",
|
||||
Help: "Project team owners get OWNER access, and all Users get READER access.",
|
||||
}, {
|
||||
Value: "publicReadWrite",
|
||||
Help: "Project team owners get OWNER access.\nAll Users get WRITER access.",
|
||||
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_policy_only",
|
||||
@@ -163,64 +163,64 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Help: "Location for the newly created buckets.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Empty for default location (US)",
|
||||
Help: "Empty for default location (US).",
|
||||
}, {
|
||||
Value: "asia",
|
||||
Help: "Multi-regional location for Asia",
|
||||
Help: "Multi-regional location for Asia.",
|
||||
}, {
|
||||
Value: "eu",
|
||||
Help: "Multi-regional location for Europe",
|
||||
Help: "Multi-regional location for Europe.",
|
||||
}, {
|
||||
Value: "us",
|
||||
Help: "Multi-regional location for United States",
|
||||
Help: "Multi-regional location for United States.",
|
||||
}, {
|
||||
Value: "asia-east1",
|
||||
Help: "Taiwan",
|
||||
Help: "Taiwan.",
|
||||
}, {
|
||||
Value: "asia-east2",
|
||||
Help: "Hong Kong",
|
||||
Help: "Hong Kong.",
|
||||
}, {
|
||||
Value: "asia-northeast1",
|
||||
Help: "Tokyo",
|
||||
Help: "Tokyo.",
|
||||
}, {
|
||||
Value: "asia-south1",
|
||||
Help: "Mumbai",
|
||||
Help: "Mumbai.",
|
||||
}, {
|
||||
Value: "asia-southeast1",
|
||||
Help: "Singapore",
|
||||
Help: "Singapore.",
|
||||
}, {
|
||||
Value: "australia-southeast1",
|
||||
Help: "Sydney",
|
||||
Help: "Sydney.",
|
||||
}, {
|
||||
Value: "europe-north1",
|
||||
Help: "Finland",
|
||||
Help: "Finland.",
|
||||
}, {
|
||||
Value: "europe-west1",
|
||||
Help: "Belgium",
|
||||
Help: "Belgium.",
|
||||
}, {
|
||||
Value: "europe-west2",
|
||||
Help: "London",
|
||||
Help: "London.",
|
||||
}, {
|
||||
Value: "europe-west3",
|
||||
Help: "Frankfurt",
|
||||
Help: "Frankfurt.",
|
||||
}, {
|
||||
Value: "europe-west4",
|
||||
Help: "Netherlands",
|
||||
Help: "Netherlands.",
|
||||
}, {
|
||||
Value: "us-central1",
|
||||
Help: "Iowa",
|
||||
Help: "Iowa.",
|
||||
}, {
|
||||
Value: "us-east1",
|
||||
Help: "South Carolina",
|
||||
Help: "South Carolina.",
|
||||
}, {
|
||||
Value: "us-east4",
|
||||
Help: "Northern Virginia",
|
||||
Help: "Northern Virginia.",
|
||||
}, {
|
||||
Value: "us-west1",
|
||||
Help: "Oregon",
|
||||
Help: "Oregon.",
|
||||
}, {
|
||||
Value: "us-west2",
|
||||
Help: "California",
|
||||
Help: "California.",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
@@ -29,7 +28,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -132,7 +130,7 @@ you want to read the media.`,
|
||||
}, {
|
||||
Name: "start_year",
|
||||
Default: 2000,
|
||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year.`,
|
||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "include_archived",
|
||||
@@ -151,24 +149,16 @@ listings and transferred.
|
||||
Without this flag, archived media will not be visible in directory
|
||||
listings and won't be transferred.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
StartYear int `config:"start_year"`
|
||||
IncludeArchived bool `config:"include_archived"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
StartYear int `config:"start_year"`
|
||||
IncludeArchived bool `config:"include_archived"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -506,9 +496,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
|
||||
lastID = newAlbums[len(newAlbums)-1].ID
|
||||
}
|
||||
for i := range newAlbums {
|
||||
anAlbum := newAlbums[i]
|
||||
anAlbum.Title = f.opt.Enc.FromStandardPath(anAlbum.Title)
|
||||
all.add(&anAlbum)
|
||||
all.add(&newAlbums[i])
|
||||
}
|
||||
if result.NextPageToken == "" {
|
||||
break
|
||||
|
||||
@@ -1,179 +0,0 @@
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
)
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
switch name {
|
||||
case "drop":
|
||||
return nil, f.db.Stop(true)
|
||||
case "dump", "fulldump":
|
||||
return nil, f.dbDump(ctx, name == "fulldump", "")
|
||||
case "import", "stickyimport":
|
||||
sticky := name == "stickyimport"
|
||||
if len(arg) != 2 {
|
||||
return nil, errors.New("please provide checksum type and path to sum file")
|
||||
}
|
||||
return nil, f.dbImport(ctx, arg[0], arg[1], sticky)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "drop",
|
||||
Short: "Drop cache",
|
||||
Long: `Completely drop checksum cache.
|
||||
Usage Example:
|
||||
rclone backend drop hasher:
|
||||
`,
|
||||
}, {
|
||||
Name: "dump",
|
||||
Short: "Dump the database",
|
||||
Long: "Dump cache records covered by the current remote",
|
||||
}, {
|
||||
Name: "fulldump",
|
||||
Short: "Full dump of the database",
|
||||
Long: "Dump all cache records in the database",
|
||||
}, {
|
||||
Name: "import",
|
||||
Short: "Import a SUM file",
|
||||
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
|
||||
Usage Example:
|
||||
rclone backend import hasher:subdir md5 /path/to/sum.md5
|
||||
`,
|
||||
}, {
|
||||
Name: "stickyimport",
|
||||
Short: "Perform fast import of a SUM file",
|
||||
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
|
||||
Usage Example:
|
||||
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
|
||||
`,
|
||||
}}
|
||||
|
||||
func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
|
||||
if root == "" {
|
||||
remoteFs, err := cache.Get(ctx, f.opt.Remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
|
||||
}
|
||||
op := &kvDump{
|
||||
full: full,
|
||||
root: root,
|
||||
path: f.db.Path(),
|
||||
fs: f,
|
||||
}
|
||||
err := f.db.Do(false, op)
|
||||
if err == kv.ErrEmpty {
|
||||
fs.Infof(op.path, "empty")
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bool) error {
|
||||
var hashType hash.Type
|
||||
if err := hashType.Set(hashName); err != nil {
|
||||
return err
|
||||
}
|
||||
if hashType == hash.None {
|
||||
return errors.New("please provide a valid hash type")
|
||||
}
|
||||
if !f.suppHashes.Contains(hashType) {
|
||||
return errors.New("unsupported hash type")
|
||||
}
|
||||
if !f.keepHashes.Contains(hashType) {
|
||||
fs.Infof(nil, "Need not import hashes of this type")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, sumPath, err := fspath.SplitFs(sumRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sumFs, err := cache.Get(ctx, sumRemote)
|
||||
switch err {
|
||||
case fs.ErrorIsFile:
|
||||
// ok
|
||||
case nil:
|
||||
return errors.Errorf("not a file: %s", sumRemote)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
sumObj, err := sumFs.NewObject(ctx, path.Base(sumPath))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot open sum file")
|
||||
}
|
||||
hashes, err := operations.ParseSumFile(ctx, sumObj)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse sum file")
|
||||
}
|
||||
|
||||
if sticky {
|
||||
rootPath := f.Fs.Root()
|
||||
for remote, hashVal := range hashes {
|
||||
key := path.Join(rootPath, remote)
|
||||
hashSums := operations.HashSums{hashName: hashVal}
|
||||
if err := f.putRawHashes(ctx, key, anyFingerprint, hashSums); err != nil {
|
||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
||||
}
|
||||
}
|
||||
fs.Infof(nil, "Summary: %d checksum(s) imported", len(hashes))
|
||||
return nil
|
||||
}
|
||||
|
||||
const longImportThreshold = 100
|
||||
if len(hashes) > longImportThreshold {
|
||||
fs.Infof(nil, "Importing %d checksums. Please wait...", len(hashes))
|
||||
}
|
||||
|
||||
doneCount := 0
|
||||
err = operations.ListFn(ctx, f, func(obj fs.Object) {
|
||||
remote := obj.Remote()
|
||||
hash := hashes[remote]
|
||||
hashes[remote] = "" // mark as handled
|
||||
o, ok := obj.(*Object)
|
||||
if ok && hash != "" {
|
||||
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
|
||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
||||
}
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
|
||||
doneCount++
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Import failed: %v", err)
|
||||
}
|
||||
skipCount := 0
|
||||
for remote, emptyOrDone := range hashes {
|
||||
if emptyOrDone != "" {
|
||||
fs.Infof(nil, "Skip vanished object: %s", remote)
|
||||
skipCount++
|
||||
}
|
||||
}
|
||||
fs.Infof(nil, "Summary: %d imported, %d skipped", doneCount, skipCount)
|
||||
return err
|
||||
}
|
||||
@@ -1,508 +0,0 @@
|
||||
// Package hasher implements a checksum handling overlay backend
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "hasher",
|
||||
Description: "Better checksums for other remotes",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Required: true,
|
||||
Help: "Remote to cache checksums for (e.g. myRemote:path).",
|
||||
}, {
|
||||
Name: "hashes",
|
||||
Default: fs.CommaSepList{"md5", "sha1"},
|
||||
Advanced: false,
|
||||
Help: "Comma separated list of supported checksum types.",
|
||||
}, {
|
||||
Name: "max_age",
|
||||
Advanced: false,
|
||||
Default: fs.DurationOff,
|
||||
Help: "Maximum time to keep checksums in cache (0 = no cache, off = cache forever).",
|
||||
}, {
|
||||
Name: "auto_size",
|
||||
Advanced: true,
|
||||
Default: fs.SizeSuffix(0),
|
||||
Help: "Auto-update checksum for files smaller than this size (disabled by default).",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
Hashes fs.CommaSepList `config:"hashes"`
|
||||
AutoSize fs.SizeSuffix `config:"auto_size"`
|
||||
MaxAge fs.Duration `config:"max_age"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
name string
|
||||
root string
|
||||
wrapper fs.Fs
|
||||
features *fs.Features
|
||||
opt *Options
|
||||
db *kv.DB
|
||||
// fingerprinting
|
||||
fpTime bool // true if using time in fingerprints
|
||||
fpHash hash.Type // hash type to use in fingerprints or None
|
||||
// hash types triaged by groups
|
||||
suppHashes hash.Set // all supported checksum types
|
||||
passHashes hash.Set // passed directly to the base without caching
|
||||
slowHashes hash.Set // passed to the base and then cached
|
||||
autoHashes hash.Set // calculated in-house and cached
|
||||
keepHashes hash.Set // checksums to keep in cache (slow + auto)
|
||||
}
|
||||
|
||||
var warnExperimental sync.Once
|
||||
|
||||
// NewFs constructs an Fs from the remote:path string
|
||||
func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs.Fs, error) {
|
||||
if !kv.Supported() {
|
||||
return nil, errors.New("hasher is not supported on this OS")
|
||||
}
|
||||
warnExperimental.Do(func() {
|
||||
fs.Infof(nil, "Hasher is EXPERIMENTAL!")
|
||||
})
|
||||
|
||||
opt := &Options{}
|
||||
err := configstruct.Set(cmap, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if strings.HasPrefix(opt.Remote, fsname+":") {
|
||||
return nil, errors.New("can't point remote at itself")
|
||||
}
|
||||
remotePath := fspath.JoinRootPath(opt.Remote, rpath)
|
||||
baseFs, err := cache.Get(ctx, remotePath)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, errors.Wrapf(err, "failed to derive base remote %q", opt.Remote)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
Fs: baseFs,
|
||||
name: fsname,
|
||||
root: rpath,
|
||||
opt: opt,
|
||||
}
|
||||
baseFeatures := baseFs.Features()
|
||||
f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported
|
||||
|
||||
if baseFeatures.SlowHash {
|
||||
f.slowHashes = f.Fs.Hashes()
|
||||
} else {
|
||||
f.passHashes = f.Fs.Hashes()
|
||||
f.fpHash = f.passHashes.GetOne()
|
||||
}
|
||||
|
||||
f.suppHashes = f.passHashes
|
||||
f.suppHashes.Add(f.slowHashes.Array()...)
|
||||
|
||||
for _, hashName := range opt.Hashes {
|
||||
var ht hash.Type
|
||||
if err := ht.Set(hashName); err != nil {
|
||||
return nil, errors.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
|
||||
}
|
||||
if !f.slowHashes.Contains(ht) {
|
||||
f.autoHashes.Add(ht)
|
||||
}
|
||||
f.keepHashes.Add(ht)
|
||||
f.suppHashes.Add(ht)
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Groups by usage: cached %s, passed %s, auto %s, slow %s, supported %s",
|
||||
f.keepHashes, f.passHashes, f.autoHashes, f.slowHashes, f.suppHashes)
|
||||
|
||||
var nilSet hash.Set
|
||||
if f.keepHashes == nilSet {
|
||||
return nil, errors.New("configured hash_names have nothing to keep in cache")
|
||||
}
|
||||
|
||||
if f.opt.MaxAge > 0 {
|
||||
gob.Register(hashRecord{})
|
||||
db, err := kv.Start(ctx, "hasher", f.Fs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.db = db
|
||||
}
|
||||
|
||||
stubFeatures := &fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
IsLocal: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
}
|
||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
return f, err
|
||||
}
|
||||
|
||||
//
|
||||
// Filesystem
|
||||
//
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string { return f.name }
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string { return f.root }
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features { return f.features }
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set { return f.suppHashes }
|
||||
|
||||
// String returns a description of the FS
|
||||
// The "hasher::" prefix is a distinctive feature.
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("hasher::%s:%s", f.name, f.root)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs { return f.Fs }
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs { return f.wrapper }
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper }
|
||||
|
||||
// Wrap base entries into hasher entries.
|
||||
func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries, err error) {
|
||||
hashEntries = baseEntries[:0] // work inplace
|
||||
for _, entry := range baseEntries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
hashEntries = append(hashEntries, f.wrapObject(x, nil))
|
||||
default:
|
||||
hashEntries = append(hashEntries, entry) // trash in - trash out
|
||||
}
|
||||
}
|
||||
return hashEntries, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if entries, err = f.Fs.List(ctx, dir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapEntries(entries)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories recursively into out.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
return f.Fs.Features().ListR(ctx, dir, func(baseEntries fs.DirEntries) error {
|
||||
hashEntries, err := f.wrapEntries(baseEntries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(hashEntries)
|
||||
})
|
||||
}
|
||||
|
||||
// Purge a directory
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
if do := f.Fs.Features().Purge; do != nil {
|
||||
if err := do(ctx, dir); err != nil {
|
||||
return err
|
||||
}
|
||||
err := f.db.Do(true, &kvPurge{
|
||||
dir: path.Join(f.Fs.Root(), dir),
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to purge some hashes: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with undeterminate size.
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if do := f.Fs.Features().PutStream; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
}
|
||||
return nil, errors.New("PutStream not supported")
|
||||
}
|
||||
|
||||
// PutUnchecked uploads the object, allowing duplicates.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if do := f.Fs.Features().PutUnchecked; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
}
|
||||
return nil, errors.New("PutUnchecked not supported")
|
||||
}
|
||||
|
||||
// pruneHash deletes hash for a path
|
||||
func (f *Fs) pruneHash(remote string) error {
|
||||
return f.db.Do(true, &kvPrune{
|
||||
key: path.Join(f.Fs.Root(), remote),
|
||||
})
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
if do := f.Fs.Features().CleanUp; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return errors.New("CleanUp not supported")
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if do := f.Fs.Features().About; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil, errors.New("About not supported")
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
if do := f.Fs.Features().ChangeNotify; do != nil {
|
||||
do(ctx, notifyFunc, pollIntervalChan)
|
||||
}
|
||||
}
|
||||
|
||||
// UserInfo returns info about the connected user
|
||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||
if do := f.Fs.Features().UserInfo; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
|
||||
// Disconnect the current user
|
||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
if do := f.Fs.Features().Disconnect; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
if do := f.Fs.Features().MergeDirs; do != nil {
|
||||
return do(ctx, dirs)
|
||||
}
|
||||
return errors.New("MergeDirs not supported")
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
if do := f.Fs.Features().DirCacheFlush; do != nil {
|
||||
do()
|
||||
}
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
if do := f.Fs.Features().PublicLink; do != nil {
|
||||
return do(ctx, remote, expire, unlink)
|
||||
}
|
||||
return "", errors.New("PublicLink not supported")
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.Fs.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
o, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
oResult, err := do(ctx, o.Object, remote)
|
||||
return f.wrapObject(oResult, err), err
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.Fs.Features().Move
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
o, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
oResult, err := do(ctx, o.Object, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_ = f.db.Do(true, &kvMove{
|
||||
src: path.Join(f.Fs.Root(), src.Remote()),
|
||||
dst: path.Join(f.Fs.Root(), remote),
|
||||
dir: false,
|
||||
fs: f,
|
||||
})
|
||||
return f.wrapObject(oResult, nil), nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
do := f.Fs.Features().DirMove
|
||||
if do == nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
err := do(ctx, srcFs.Fs, srcRemote, dstRemote)
|
||||
if err == nil {
|
||||
_ = f.db.Do(true, &kvMove{
|
||||
src: path.Join(srcFs.Fs.Root(), srcRemote),
|
||||
dst: path.Join(f.Fs.Root(), dstRemote),
|
||||
dir: true,
|
||||
fs: f,
|
||||
})
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
||||
err = f.db.Stop(false)
|
||||
if do := f.Fs.Features().Shutdown; do != nil {
|
||||
if err2 := do(ctx); err2 != nil {
|
||||
err = err2
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o, err := f.Fs.NewObject(ctx, remote)
|
||||
return f.wrapObject(o, err), err
|
||||
}
|
||||
|
||||
//
|
||||
// Object
|
||||
//
|
||||
|
||||
// Object represents a composite file wrapping one or more data chunks
|
||||
type Object struct {
|
||||
fs.Object
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// Wrap base object into hasher object
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) *Object {
|
||||
if err != nil || o == nil {
|
||||
return nil
|
||||
}
|
||||
return &Object{Object: o, f: f}
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info { return o.f }
|
||||
|
||||
// UnWrap returns the wrapped Object
|
||||
func (o *Object) UnWrap() fs.Object { return o.Object }
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Object.String()
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if possible
|
||||
func (o *Object) ID() string {
|
||||
if doer, ok := o.Object.(fs.IDer); ok {
|
||||
return doer.ID()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetTier returns the Tier of the Object if possible
|
||||
func (o *Object) GetTier() string {
|
||||
if doer, ok := o.Object.(fs.GetTierer); ok {
|
||||
return doer.GetTier()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// SetTier set the Tier of the Object if possible
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
if doer, ok := o.Object.(fs.SetTierer); ok {
|
||||
return doer.SetTier(tier)
|
||||
}
|
||||
return errors.New("SetTier not supported")
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
if doer, ok := o.Object.(fs.MimeTyper); ok {
|
||||
return doer.MimeType(ctx)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
@@ -1,78 +0,0 @@
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
|
||||
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
||||
_, o := fstests.PutTestContents(ctx, t, f, &item, data, true)
|
||||
require.NotNil(t, o)
|
||||
return o
|
||||
}
|
||||
|
||||
func (f *Fs) testUploadFromCrypt(t *testing.T) {
|
||||
// make a temporary local remote
|
||||
tempRoot, err := fstest.LocalRemote()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempRoot)
|
||||
}()
|
||||
|
||||
// make a temporary crypt remote
|
||||
ctx := context.Background()
|
||||
pass := obscure.MustObscure("crypt")
|
||||
remote := fmt.Sprintf(":crypt,remote=%s,password=%s:", tempRoot, pass)
|
||||
cryptFs, err := fs.NewFs(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
|
||||
// make a test file on the crypt remote
|
||||
const dirName = "from_crypt_1"
|
||||
const fileName = dirName + "/file_from_crypt_1"
|
||||
const longTime = fs.ModTimeNotSupported
|
||||
src := putFile(ctx, t, cryptFs, fileName, "doggy froggy")
|
||||
|
||||
// ensure that hash does not exist yet
|
||||
_ = f.pruneHash(fileName)
|
||||
hashType := f.keepHashes.GetOne()
|
||||
hash, err := f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, hash)
|
||||
|
||||
// upload file to hasher
|
||||
in, err := src.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dst, err := f.Put(ctx, in, src)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, dst)
|
||||
|
||||
// check that hash was created
|
||||
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, hash)
|
||||
//t.Logf("hash is %q", hash)
|
||||
_ = operations.Purge(ctx, f, dirName)
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
if !kv.Supported() {
|
||||
t.Skip("hasher is not supported on this OS")
|
||||
}
|
||||
t.Run("UploadFromCrypt", f.testUploadFromCrypt)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
@@ -1,38 +0,0 @@
|
||||
package hasher_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/hasher"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all" // for integration tests
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if !kv.Supported() {
|
||||
t.Skip("hasher is not supported on this OS")
|
||||
}
|
||||
opt := fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*hasher.Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{},
|
||||
}
|
||||
if *fstest.RemoteName == "" {
|
||||
tempDir := filepath.Join(os.TempDir(), "rclone-hasher-test")
|
||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: "TestHasher", Key: "type", Value: "hasher"},
|
||||
{Name: "TestHasher", Key: "remote", Value: tempDir},
|
||||
}
|
||||
opt.RemoteName = "TestHasher:"
|
||||
}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
@@ -1,315 +0,0 @@
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
||||
anyFingerprint = "*"
|
||||
)
|
||||
|
||||
type hashMap map[hash.Type]string
|
||||
|
||||
type hashRecord struct {
|
||||
Fp string // fingerprint
|
||||
Hashes operations.HashSums
|
||||
Created time.Time
|
||||
}
|
||||
|
||||
func (r *hashRecord) encode(key string) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
if err := gob.NewEncoder(&buf).Encode(r); err != nil {
|
||||
fs.Debugf(key, "hasher encoding %v: %v", r, err)
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (r *hashRecord) decode(key string, data []byte) error {
|
||||
if err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(r); err != nil {
|
||||
fs.Debugf(key, "hasher decoding %q failed: %v", data, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// kvPrune: prune a single hash
|
||||
type kvPrune struct {
|
||||
key string
|
||||
}
|
||||
|
||||
func (op *kvPrune) Do(ctx context.Context, b kv.Bucket) error {
|
||||
return b.Delete([]byte(op.key))
|
||||
}
|
||||
|
||||
// kvPurge: delete a subtree
|
||||
type kvPurge struct {
|
||||
dir string
|
||||
}
|
||||
|
||||
func (op *kvPurge) Do(ctx context.Context, b kv.Bucket) error {
|
||||
dir := op.dir
|
||||
if !strings.HasSuffix(dir, "/") {
|
||||
dir += "/"
|
||||
}
|
||||
var items []string
|
||||
cur := b.Cursor()
|
||||
bkey, _ := cur.Seek([]byte(dir))
|
||||
for bkey != nil {
|
||||
key := string(bkey)
|
||||
if !strings.HasPrefix(key, dir) {
|
||||
break
|
||||
}
|
||||
items = append(items, key[len(dir):])
|
||||
bkey, _ = cur.Next()
|
||||
}
|
||||
nerr := 0
|
||||
for _, sub := range items {
|
||||
if err := b.Delete([]byte(dir + sub)); err != nil {
|
||||
nerr++
|
||||
}
|
||||
}
|
||||
fs.Debugf(dir, "%d hashes purged, %d failed", len(items)-nerr, nerr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// kvMove: assign hashes to new path
|
||||
type kvMove struct {
|
||||
src string
|
||||
dst string
|
||||
dir bool
|
||||
fs *Fs
|
||||
}
|
||||
|
||||
func (op *kvMove) Do(ctx context.Context, b kv.Bucket) error {
|
||||
src, dst := op.src, op.dst
|
||||
if !op.dir {
|
||||
err := moveHash(b, src, dst)
|
||||
fs.Debugf(op.fs, "moving cached hash %s to %s (err: %v)", src, dst, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(src, "/") {
|
||||
src += "/"
|
||||
}
|
||||
if !strings.HasSuffix(dst, "/") {
|
||||
dst += "/"
|
||||
}
|
||||
|
||||
var items []string
|
||||
cur := b.Cursor()
|
||||
bkey, _ := cur.Seek([]byte(src))
|
||||
for bkey != nil {
|
||||
key := string(bkey)
|
||||
if !strings.HasPrefix(key, src) {
|
||||
break
|
||||
}
|
||||
items = append(items, key[len(src):])
|
||||
bkey, _ = cur.Next()
|
||||
}
|
||||
|
||||
nerr := 0
|
||||
for _, suffix := range items {
|
||||
srcKey, dstKey := src+suffix, dst+suffix
|
||||
err := moveHash(b, srcKey, dstKey)
|
||||
fs.Debugf(op.fs, "Rename cache record %s -> %s (err: %v)", srcKey, dstKey, err)
|
||||
if err != nil {
|
||||
nerr++
|
||||
}
|
||||
}
|
||||
fs.Debugf(op.fs, "%d hashes moved, %d failed", len(items)-nerr, nerr)
|
||||
return nil
|
||||
}
|
||||
|
||||
func moveHash(b kv.Bucket, src, dst string) error {
|
||||
data := b.Get([]byte(src))
|
||||
err := b.Delete([]byte(src))
|
||||
if err != nil || len(data) == 0 {
|
||||
return err
|
||||
}
|
||||
return b.Put([]byte(dst), data)
|
||||
}
|
||||
|
||||
// kvGet: get single hash from database
|
||||
type kvGet struct {
|
||||
key string
|
||||
fp string
|
||||
hash string
|
||||
val string
|
||||
age time.Duration
|
||||
}
|
||||
|
||||
func (op *kvGet) Do(ctx context.Context, b kv.Bucket) error {
|
||||
data := b.Get([]byte(op.key))
|
||||
if len(data) == 0 {
|
||||
return errors.New("no record")
|
||||
}
|
||||
var r hashRecord
|
||||
if err := r.decode(op.key, data); err != nil {
|
||||
return errors.New("invalid record")
|
||||
}
|
||||
if !(r.Fp == anyFingerprint || op.fp == anyFingerprint || r.Fp == op.fp) {
|
||||
return errors.New("fingerprint changed")
|
||||
}
|
||||
if time.Since(r.Created) > op.age {
|
||||
return errors.New("record timed out")
|
||||
}
|
||||
if r.Hashes != nil {
|
||||
op.val = r.Hashes[op.hash]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// kvPut: set hashes for an object by key
|
||||
type kvPut struct {
|
||||
key string
|
||||
fp string
|
||||
hashes operations.HashSums
|
||||
age time.Duration
|
||||
}
|
||||
|
||||
func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
|
||||
data := b.Get([]byte(op.key))
|
||||
var r hashRecord
|
||||
if len(data) > 0 {
|
||||
err = r.decode(op.key, data)
|
||||
if err != nil || r.Fp != op.fp || time.Since(r.Created) > op.age {
|
||||
r.Hashes = nil
|
||||
}
|
||||
}
|
||||
if len(r.Hashes) == 0 {
|
||||
r.Created = time.Now()
|
||||
r.Hashes = operations.HashSums{}
|
||||
r.Fp = op.fp
|
||||
}
|
||||
|
||||
for hashType, hashVal := range op.hashes {
|
||||
r.Hashes[hashType] = hashVal
|
||||
}
|
||||
if data, err = r.encode(op.key); err != nil {
|
||||
return errors.Wrap(err, "marshal failed")
|
||||
}
|
||||
if err = b.Put([]byte(op.key), data); err != nil {
|
||||
return errors.Wrap(err, "put failed")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// kvDump: dump the database.
|
||||
// Note: long dump can cause concurrent operations to fail.
|
||||
type kvDump struct {
|
||||
full bool
|
||||
root string
|
||||
path string
|
||||
fs *Fs
|
||||
num int
|
||||
total int
|
||||
}
|
||||
|
||||
func (op *kvDump) Do(ctx context.Context, b kv.Bucket) error {
|
||||
f, baseRoot, dbPath := op.fs, op.root, op.path
|
||||
|
||||
if op.full {
|
||||
total := 0
|
||||
num := 0
|
||||
_ = b.ForEach(func(bkey, data []byte) error {
|
||||
total++
|
||||
key := string(bkey)
|
||||
include := (baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/"))
|
||||
var r hashRecord
|
||||
if err := r.decode(key, data); err != nil {
|
||||
fs.Errorf(nil, "%s: invalid record: %v", key, err)
|
||||
return nil
|
||||
}
|
||||
fmt.Println(f.dumpLine(&r, key, include, nil))
|
||||
if include {
|
||||
num++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
fs.Infof(dbPath, "%d records out of %d", num, total)
|
||||
op.num, op.total = num, total // for unit tests
|
||||
return nil
|
||||
}
|
||||
|
||||
num := 0
|
||||
cur := b.Cursor()
|
||||
var bkey, data []byte
|
||||
if baseRoot != "" {
|
||||
bkey, data = cur.Seek([]byte(baseRoot))
|
||||
} else {
|
||||
bkey, data = cur.First()
|
||||
}
|
||||
for bkey != nil {
|
||||
key := string(bkey)
|
||||
if !(baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/")) {
|
||||
break
|
||||
}
|
||||
var r hashRecord
|
||||
if err := r.decode(key, data); err != nil {
|
||||
fs.Errorf(nil, "%s: invalid record: %v", key, err)
|
||||
continue
|
||||
}
|
||||
if key = strings.TrimPrefix(key[len(baseRoot):], "/"); key == "" {
|
||||
key = "/"
|
||||
}
|
||||
fmt.Println(f.dumpLine(&r, key, true, nil))
|
||||
num++
|
||||
bkey, data = cur.Next()
|
||||
}
|
||||
fs.Infof(dbPath, "%d records", num)
|
||||
op.num = num // for unit tests
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) dumpLine(r *hashRecord, path string, include bool, err error) string {
|
||||
var status string
|
||||
switch {
|
||||
case !include:
|
||||
status = "ext"
|
||||
case err != nil:
|
||||
status = "bad"
|
||||
case r.Fp == anyFingerprint:
|
||||
status = "stk"
|
||||
default:
|
||||
status = "ok "
|
||||
}
|
||||
|
||||
var hashes []string
|
||||
for _, hashType := range f.keepHashes.Array() {
|
||||
hashName := hashType.String()
|
||||
hashVal := r.Hashes[hashName]
|
||||
if hashVal == "" || err != nil {
|
||||
hashVal = "-"
|
||||
}
|
||||
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType), hashVal)
|
||||
hashes = append(hashes, hashName+":"+hashVal)
|
||||
}
|
||||
hashesStr := strings.Join(hashes, " ")
|
||||
|
||||
age := time.Since(r.Created).Round(time.Second)
|
||||
if age > 24*time.Hour {
|
||||
age = age.Round(time.Hour)
|
||||
}
|
||||
if err != nil {
|
||||
age = 0
|
||||
}
|
||||
ageStr := age.String()
|
||||
if strings.HasSuffix(ageStr, "h0m0s") {
|
||||
ageStr = strings.TrimSuffix(ageStr, "0m0s")
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s %s %9s %s", status, hashesStr, ageStr, path)
|
||||
}
|
||||
@@ -1,305 +0,0 @@
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
// obtain hash for an object
|
||||
func (o *Object) getHash(ctx context.Context, hashType hash.Type) (string, error) {
|
||||
maxAge := time.Duration(o.f.opt.MaxAge)
|
||||
if maxAge <= 0 {
|
||||
return "", nil
|
||||
}
|
||||
fp := o.fingerprint(ctx)
|
||||
if fp == "" {
|
||||
return "", errors.New("fingerprint failed")
|
||||
}
|
||||
return o.f.getRawHash(ctx, hashType, o.Remote(), fp, maxAge)
|
||||
}
|
||||
|
||||
// obtain hash for a path
|
||||
func (f *Fs) getRawHash(ctx context.Context, hashType hash.Type, remote, fp string, age time.Duration) (string, error) {
|
||||
key := path.Join(f.Fs.Root(), remote)
|
||||
op := &kvGet{
|
||||
key: key,
|
||||
fp: fp,
|
||||
hash: hashType.String(),
|
||||
age: age,
|
||||
}
|
||||
err := f.db.Do(false, op)
|
||||
return op.val, err
|
||||
}
|
||||
|
||||
// put new hashes for an object
|
||||
func (o *Object) putHashes(ctx context.Context, rawHashes hashMap) error {
|
||||
if o.f.opt.MaxAge <= 0 {
|
||||
return nil
|
||||
}
|
||||
fp := o.fingerprint(ctx)
|
||||
if fp == "" {
|
||||
return nil
|
||||
}
|
||||
key := path.Join(o.f.Fs.Root(), o.Remote())
|
||||
hashes := operations.HashSums{}
|
||||
for hashType, hashVal := range rawHashes {
|
||||
hashes[hashType.String()] = hashVal
|
||||
}
|
||||
return o.f.putRawHashes(ctx, key, fp, hashes)
|
||||
}
|
||||
|
||||
// set hashes for a path without any validation
|
||||
func (f *Fs) putRawHashes(ctx context.Context, key, fp string, hashes operations.HashSums) error {
|
||||
return f.db.Do(true, &kvPut{
|
||||
key: key,
|
||||
fp: fp,
|
||||
hashes: hashes,
|
||||
age: time.Duration(f.opt.MaxAge),
|
||||
})
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file or "" if unavailable.
|
||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (hashVal string, err error) {
|
||||
f := o.f
|
||||
if f.passHashes.Contains(hashType) {
|
||||
fs.Debugf(o, "pass %s", hashType)
|
||||
return o.Object.Hash(ctx, hashType)
|
||||
}
|
||||
if !f.suppHashes.Contains(hashType) {
|
||||
fs.Debugf(o, "unsupp %s", hashType)
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if hashVal, err = o.getHash(ctx, hashType); err != nil {
|
||||
fs.Debugf(o, "getHash: %v", err)
|
||||
err = nil
|
||||
hashVal = ""
|
||||
}
|
||||
if hashVal != "" {
|
||||
fs.Debugf(o, "cached %s = %q", hashType, hashVal)
|
||||
return hashVal, nil
|
||||
}
|
||||
if f.slowHashes.Contains(hashType) {
|
||||
fs.Debugf(o, "slow %s", hashType)
|
||||
hashVal, err = o.Object.Hash(ctx, hashType)
|
||||
if err == nil && hashVal != "" && f.keepHashes.Contains(hashType) {
|
||||
if err = o.putHashes(ctx, hashMap{hashType: hashVal}); err != nil {
|
||||
fs.Debugf(o, "putHashes: %v", err)
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return hashVal, err
|
||||
}
|
||||
if f.autoHashes.Contains(hashType) && o.Size() < int64(f.opt.AutoSize) {
|
||||
_ = o.updateHashes(ctx)
|
||||
if hashVal, err = o.getHash(ctx, hashType); err != nil {
|
||||
fs.Debugf(o, "auto %s = %q (%v)", hashType, hashVal, err)
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return hashVal, err
|
||||
}
|
||||
|
||||
// updateHashes performs implicit "rclone hashsum --download" and updates cache.
|
||||
func (o *Object) updateHashes(ctx context.Context) error {
|
||||
r, err := o.Open(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(o, "update failed (open): %v", err)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = r.Close()
|
||||
}()
|
||||
if _, err = io.Copy(ioutil.Discard, r); err != nil {
|
||||
fs.Infof(o, "update failed (copy): %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the object with the given data, time and size.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
_ = o.f.pruneHash(src.Remote())
|
||||
return o.Object.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Remove an object.
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
_ = o.f.pruneHash(o.Remote())
|
||||
return o.Object.Remove(ctx)
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the file.
|
||||
// Also prunes the cache entry when modtime changes so that
|
||||
// touching a file will trigger checksum recalculation even
|
||||
// on backends that don't provide modTime with fingerprint.
|
||||
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||
if mtime != o.Object.ModTime(ctx) {
|
||||
_ = o.f.pruneHash(o.Remote())
|
||||
}
|
||||
return o.Object.SetModTime(ctx, mtime)
|
||||
}
|
||||
|
||||
// Open opens the file for read.
|
||||
// Full reads will also update object hashes.
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadCloser, err error) {
|
||||
size := o.Size()
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch opt := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = opt.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = opt.Decode(size)
|
||||
}
|
||||
}
|
||||
if offset < 0 {
|
||||
return nil, errors.New("invalid offset")
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = size - offset
|
||||
}
|
||||
if r, err = o.Object.Open(ctx, options...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if offset != 0 || limit < size {
|
||||
// It's a partial read
|
||||
return r, err
|
||||
}
|
||||
return o.f.newHashingReader(ctx, r, func(sums hashMap) {
|
||||
if err := o.putHashes(ctx, sums); err != nil {
|
||||
fs.Infof(o, "auto hashing error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Put data into the remote path with given modTime and size
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
var (
|
||||
o *Object
|
||||
common hash.Set
|
||||
rehash bool
|
||||
hashes hashMap
|
||||
)
|
||||
if fsrc := src.Fs(); fsrc != nil {
|
||||
common = fsrc.Hashes().Overlap(f.keepHashes)
|
||||
// Rehash if source does not have all required hashes or hashing is slow
|
||||
rehash = fsrc.Features().SlowHash || common != f.keepHashes
|
||||
}
|
||||
|
||||
wrapIn := in
|
||||
if rehash {
|
||||
r, err := f.newHashingReader(ctx, in, func(sums hashMap) {
|
||||
hashes = sums
|
||||
})
|
||||
fs.Debugf(src, "Rehash in-fly due to incomplete or slow source set %v (err: %v)", common, err)
|
||||
if err == nil {
|
||||
wrapIn = r
|
||||
} else {
|
||||
rehash = false
|
||||
}
|
||||
}
|
||||
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
|
||||
o = f.wrapObject(oResult, err)
|
||||
if o == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !rehash {
|
||||
hashes = hashMap{}
|
||||
for _, ht := range common.Array() {
|
||||
if h, e := src.Hash(ctx, ht); e == nil && h != "" {
|
||||
hashes[ht] = h
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(hashes) > 0 {
|
||||
err := o.putHashes(ctx, hashes)
|
||||
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
|
||||
}
|
||||
return o, err
|
||||
}
|
||||
|
||||
type hashingReader struct {
|
||||
rd io.Reader
|
||||
hasher *hash.MultiHasher
|
||||
fun func(hashMap)
|
||||
}
|
||||
|
||||
func (f *Fs) newHashingReader(ctx context.Context, rd io.Reader, fun func(hashMap)) (*hashingReader, error) {
|
||||
hasher, err := hash.NewMultiHasherTypes(f.keepHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hr := &hashingReader{
|
||||
rd: rd,
|
||||
hasher: hasher,
|
||||
fun: fun,
|
||||
}
|
||||
return hr, nil
|
||||
}
|
||||
|
||||
func (r *hashingReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.rd.Read(p)
|
||||
if err != nil && err != io.EOF {
|
||||
r.hasher = nil
|
||||
}
|
||||
if r.hasher != nil {
|
||||
if _, errHash := r.hasher.Write(p[:n]); errHash != nil {
|
||||
r.hasher = nil
|
||||
err = errHash
|
||||
}
|
||||
}
|
||||
if err == io.EOF && r.hasher != nil {
|
||||
r.fun(r.hasher.Sums())
|
||||
r.hasher = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *hashingReader) Close() error {
|
||||
if rc, ok := r.rd.(io.ReadCloser); ok {
|
||||
return rc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return object fingerprint or empty string in case of errors
|
||||
//
|
||||
// Note that we can't use the generic `fs.Fingerprint` here because
|
||||
// this fingerprint is used to pick _derived hashes_ that are slow
|
||||
// to calculate or completely unsupported by the base remote.
|
||||
//
|
||||
// The hasher fingerprint must be based on `fsHash`, the first _fast_
|
||||
// hash supported _by the underlying remote_ (if there is one),
|
||||
// while `fs.Fingerprint` would select a hash _produced by hasher_
|
||||
// creating unresolvable fingerprint loop.
|
||||
func (o *Object) fingerprint(ctx context.Context) string {
|
||||
size := o.Object.Size()
|
||||
timeStr := "-"
|
||||
if o.f.fpTime {
|
||||
timeStr = o.Object.ModTime(ctx).UTC().Format(timeFormat)
|
||||
if timeStr == "" {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
hashStr := "-"
|
||||
if o.f.fpHash != hash.None {
|
||||
var err error
|
||||
hashStr, err = o.Object.Hash(ctx, o.f.fpHash)
|
||||
if hashStr == "" || err != nil {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%d,%s,%s", size, timeStr, hashStr)
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
@@ -19,28 +18,35 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "namenode",
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Help: "hadoop name node and port",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "namenode:8020",
|
||||
Help: "Connect to host namenode at port 8020",
|
||||
}},
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
Help: "hadoop user name",
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "root",
|
||||
Help: "Connect to hdfs as root.",
|
||||
Help: "Connect to hdfs as root",
|
||||
}},
|
||||
}, {
|
||||
Name: "service_principal_name",
|
||||
Help: `Kerberos service principal name for the namenode.
|
||||
Help: `Kerberos service principal name for the namenode
|
||||
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||
(SERVICE/FQDN) for the namenode.`,
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "hdfs/namenode.hadoop.docker",
|
||||
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "data_transfer_protection",
|
||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
|
||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy
|
||||
|
||||
Specifies whether or not authentication, data signature integrity
|
||||
checks, and wire encryption is required when communicating the the
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Test HDFS filesystem interface
|
||||
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs_test
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// Build for hdfs for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9
|
||||
// +build plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
@@ -38,13 +38,20 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of http host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||
Help: "URL of http host to connect to",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
}, {
|
||||
Value: "https://user:pass@example.com",
|
||||
Help: "Connect to example.com using a username and password",
|
||||
}},
|
||||
}, {
|
||||
Name: "headers",
|
||||
Help: `Set HTTP headers for all transactions.
|
||||
Help: `Set HTTP headers for all transactions
|
||||
|
||||
Use this to set additional HTTP headers for all transactions.
|
||||
Use this to set additional HTTP headers for all transactions
|
||||
|
||||
The input format is comma separated list of key,value pairs. Standard
|
||||
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
||||
@@ -57,7 +64,7 @@ You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_slash",
|
||||
Help: `Set this if the site doesn't end directories with /.
|
||||
Help: `Set this if the site doesn't end directories with /
|
||||
|
||||
Use this if your target website does not use / on the end of
|
||||
directories.
|
||||
@@ -73,7 +80,7 @@ directories.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head",
|
||||
Help: `Don't use HEAD requests to find file sizes in dir listing.
|
||||
Help: `Don't use HEAD requests to find file sizes in dir listing
|
||||
|
||||
If your site is being very slow to load then you can try this option.
|
||||
Normally rclone does a HEAD request for each potential file in a
|
||||
|
||||
@@ -368,7 +368,6 @@ type JottaFile struct {
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicURI string `xml:"publicURI"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt Time `xml:"currentRevision>created"`
|
||||
|
||||
@@ -86,7 +86,7 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "trashed_only",
|
||||
Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
|
||||
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -122,15 +122,15 @@ func init() {
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch config.State {
|
||||
case "":
|
||||
return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type.`, []fs.OptionExample{{
|
||||
return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type`, []fs.OptionExample{{
|
||||
Value: "standard",
|
||||
Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
|
||||
Help: "Standard authentication - use this if you're a normal Jottacloud user.",
|
||||
}, {
|
||||
Value: "legacy",
|
||||
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
|
||||
Help: "Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
|
||||
}, {
|
||||
Value: "telia",
|
||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
|
||||
Help: "Telia Cloud authentication - use this if you are using Telia Cloud.",
|
||||
}})
|
||||
case "auth_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
@@ -599,9 +599,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read metadata failed")
|
||||
}
|
||||
if result.XMLName.Local == "folder" {
|
||||
return nil, fs.ErrorIsDir
|
||||
} else if result.XMLName.Local != "file" {
|
||||
if result.XMLName.Local != "file" {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return &result, nil
|
||||
@@ -764,7 +762,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
if err == fs.ErrorNotAFile || err == fs.ErrorIsDir {
|
||||
if err == fs.ErrorNotAFile {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
@@ -786,7 +784,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
_, err := f.NewObject(context.TODO(), remote)
|
||||
if err != nil {
|
||||
if uErr := errors.Cause(err); uErr == fs.ErrorObjectNotFound || uErr == fs.ErrorNotAFile || uErr == fs.ErrorIsDir {
|
||||
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||
// File doesn't exist so return old f
|
||||
f.root = root
|
||||
return f, nil
|
||||
@@ -809,10 +807,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Jot
|
||||
}
|
||||
var err error
|
||||
if info != nil {
|
||||
if !f.validFile(info) {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
err = o.setMetaData(info) // sets the info
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData(ctx, false) // reads info and meta, returning an error
|
||||
}
|
||||
@@ -884,27 +880,37 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
|
||||
if !f.validFolder(&result) {
|
||||
if bool(result.Deleted) && !f.opt.TrashedOnly {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
for i := range result.Folders {
|
||||
item := &result.Folders[i]
|
||||
if f.validFolder(item) {
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
|
||||
d := fs.NewDir(remote, time.Time(item.ModifiedAt))
|
||||
entries = append(entries, d)
|
||||
if !f.opt.TrashedOnly && bool(item.Deleted) {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
|
||||
d := fs.NewDir(remote, time.Time(item.ModifiedAt))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
|
||||
for i := range result.Files {
|
||||
item := &result.Files[i]
|
||||
if f.validFile(item) {
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
|
||||
if o, err := f.newObjectWithInfo(ctx, remote, item); err == nil {
|
||||
entries = append(entries, o)
|
||||
if f.opt.TrashedOnly {
|
||||
if !item.Deleted || item.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if item.Deleted || item.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
}
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
@@ -921,7 +927,7 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
|
||||
startPathLength := len(startPath)
|
||||
for i := range startFolder.Folders {
|
||||
folder := &startFolder.Folders[i]
|
||||
if !f.validFolder(folder) {
|
||||
if folder.Deleted {
|
||||
return nil
|
||||
}
|
||||
folderPath := f.opt.Enc.ToStandardPath(path.Join(folder.Path, folder.Name))
|
||||
@@ -939,16 +945,17 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
|
||||
}
|
||||
for i := range folder.Files {
|
||||
file := &folder.Files[i]
|
||||
if f.validFile(file) {
|
||||
remoteFile := path.Join(remoteDir, f.opt.Enc.ToStandardName(file.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = fn(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if file.Deleted || file.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
remoteFile := path.Join(remoteDir, f.opt.Enc.ToStandardName(file.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = fn(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1261,23 +1268,15 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return "", errors.Wrap(err, "couldn't create public link")
|
||||
}
|
||||
if unlink {
|
||||
if result.PublicURI != "" {
|
||||
return "", errors.Errorf("couldn't remove public link - %q", result.PublicURI)
|
||||
if result.PublicSharePath != "" {
|
||||
return "", errors.Errorf("couldn't remove public link - %q", result.PublicSharePath)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
if result.PublicURI == "" {
|
||||
return "", errors.New("couldn't create public link - no uri received")
|
||||
if result.PublicSharePath == "" {
|
||||
return "", errors.New("couldn't create public link - no link path received")
|
||||
}
|
||||
if result.PublicSharePath != "" {
|
||||
webLink := joinPath(baseURL, result.PublicSharePath)
|
||||
fs.Debugf(nil, "Web link: %s", webLink)
|
||||
} else {
|
||||
fs.Debugf(nil, "No web link received")
|
||||
}
|
||||
directLink := joinPath(baseURL, fmt.Sprintf("opin/io/downloadPublic/%s/%s", f.user, result.PublicURI))
|
||||
fs.Debugf(nil, "Direct link: %s", directLink)
|
||||
return directLink, nil
|
||||
return joinPath(baseURL, result.PublicSharePath), nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
@@ -1297,21 +1296,6 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// UserInfo fetches info about the current user
|
||||
func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err error) {
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]string{
|
||||
"Username": cust.Username,
|
||||
"Email": cust.Email,
|
||||
"Name": cust.Name,
|
||||
"AccountType": cust.AccountType,
|
||||
"SubscriptionType": cust.SubscriptionType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
opts := rest.Opts{
|
||||
@@ -1382,25 +1366,6 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// validFile checks if info indicates file is valid
|
||||
func (f *Fs) validFile(info *api.JottaFile) bool {
|
||||
if info.State != "COMPLETED" {
|
||||
return false // File is incomplete or corrupt
|
||||
}
|
||||
if !info.Deleted {
|
||||
return !f.opt.TrashedOnly // Regular file; return false if TrashedOnly, else true
|
||||
}
|
||||
return f.opt.TrashedOnly // Deleted file; return true if TrashedOnly, else false
|
||||
}
|
||||
|
||||
// validFolder checks if info indicates folder is valid
|
||||
func (f *Fs) validFolder(info *api.JottaFolder) bool {
|
||||
// Returns true if folder is not deleted.
|
||||
// If TrashedOnly option then always returns true, because a folder not
|
||||
// in trash must be traversed to get to files/subfolders that are.
|
||||
return !bool(info.Deleted) || f.opt.TrashedOnly
|
||||
}
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.JottaFile) (err error) {
|
||||
o.hasMetaData = true
|
||||
@@ -1420,7 +1385,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !o.fs.validFile(info) {
|
||||
if bool(info.Deleted) && !o.fs.opt.TrashedOnly {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return o.setMetaData(info)
|
||||
@@ -1441,50 +1406,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
// make sure metadata is available, we need its current size and md5
|
||||
err := o.readMetaData(ctx, false)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// prepare allocate request with existing metadata but changed timestamps
|
||||
var resp *http.Response
|
||||
var options []fs.OpenOption
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "files/v1/allocate",
|
||||
Options: options,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(modTime).APIString()
|
||||
var request = api.AllocateFileRequest{
|
||||
Bytes: o.size,
|
||||
Created: fileDate,
|
||||
Modified: fileDate,
|
||||
Md5: o.md5,
|
||||
Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
|
||||
}
|
||||
|
||||
// send it
|
||||
var response api.AllocateFileResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check response
|
||||
if response.State != "COMPLETED" {
|
||||
// could be the file was modified (size/md5 changed) between readMetaData and the allocate request
|
||||
return errors.New("metadata did not match")
|
||||
}
|
||||
|
||||
// update local metadata
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
@@ -1717,7 +1639,6 @@ var (
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
|
||||
@@ -32,29 +32,29 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use.",
|
||||
Help: "The Koofr API endpoint to use",
|
||||
Default: "https://app.koofr.net",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
|
||||
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "setmtime",
|
||||
Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||
Default: true,
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name.",
|
||||
Help: "Your Koofr user name",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
@@ -344,7 +344,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err e
|
||||
return nil, translateErrorsObject(err)
|
||||
}
|
||||
if info.Type == "dir" {
|
||||
return nil, fs.ErrorIsDir
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return &Object{
|
||||
fs: f,
|
||||
@@ -608,25 +608,5 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
if err != nil {
|
||||
return "", translateErrorsDir(err)
|
||||
}
|
||||
|
||||
// URL returned by API looks like following:
|
||||
//
|
||||
// https://app.koofr.net/links/35d9fb92-74a3-4930-b4ed-57f123bfb1a6
|
||||
//
|
||||
// Direct url looks like following:
|
||||
//
|
||||
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
|
||||
//
|
||||
// I am not sure about meaning of "path" parameter; in my expriments
|
||||
// it is always "%2F", and omitting it or putting any other value
|
||||
// results in 404.
|
||||
//
|
||||
// There is one more quirk: direct link to file in / returns that file,
|
||||
// direct link to file somewhere else in hierarchy returns zip archive
|
||||
// with one member.
|
||||
link := linkData.URL
|
||||
link = strings.ReplaceAll(link, "/links", "/content/links")
|
||||
link += "/files/get?path=%2F"
|
||||
|
||||
return link, nil
|
||||
return linkData.ShortURL, nil
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build darwin || dragonfly || freebsd || linux
|
||||
// +build darwin dragonfly freebsd linux
|
||||
|
||||
package local
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package local
|
||||
|
||||
11
backend/local/encode_darwin.go
Normal file
11
backend/local/encode_darwin.go
Normal file
@@ -0,0 +1,11 @@
|
||||
//+build darwin
|
||||
|
||||
package local
|
||||
|
||||
import "github.com/rclone/rclone/lib/encoder"
|
||||
|
||||
// This is the encoding used by the local backend for macOS
|
||||
//
|
||||
// macOS can't store invalid UTF-8, it converts them into %XX encoding
|
||||
const defaultEnc = (encoder.Base |
|
||||
encoder.EncodeInvalidUtf8)
|
||||
8
backend/local/encode_other.go
Normal file
8
backend/local/encode_other.go
Normal file
@@ -0,0 +1,8 @@
|
||||
//+build !windows,!darwin
|
||||
|
||||
package local
|
||||
|
||||
import "github.com/rclone/rclone/lib/encoder"
|
||||
|
||||
// This is the encoding used by the local backend for non windows platforms
|
||||
const defaultEnc = encoder.Base
|
||||
@@ -1,9 +1,10 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
//+build windows
|
||||
|
||||
package encoder
|
||||
package local
|
||||
|
||||
// OS is the encoding used by the local backend for windows platforms
|
||||
import "github.com/rclone/rclone/lib/encoder"
|
||||
|
||||
// This is the encoding used by the local backend for windows platforms
|
||||
//
|
||||
// List of replaced characters:
|
||||
// < (less than) -> '<' // FULLWIDTH LESS-THAN SIGN
|
||||
@@ -23,10 +24,10 @@ package encoder
|
||||
// Also encode invalid UTF-8 bytes as Go can't convert them to UTF-16.
|
||||
//
|
||||
// https://docs.microsoft.com/de-de/windows/desktop/FileIO/naming-a-file#naming-conventions
|
||||
const OS = (Base |
|
||||
EncodeWin |
|
||||
EncodeBackSlash |
|
||||
EncodeCtl |
|
||||
EncodeRightSpace |
|
||||
EncodeRightPeriod |
|
||||
EncodeInvalidUtf8)
|
||||
const defaultEnc = (encoder.Base |
|
||||
encoder.EncodeWin |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightPeriod |
|
||||
encoder.EncodeInvalidUtf8)
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
//+build !linux
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
//+build linux
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build windows || plan9 || js
|
||||
// +build windows plan9 js
|
||||
|
||||
package local
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !windows && !plan9 && !js
|
||||
// +build !windows,!plan9,!js
|
||||
|
||||
package local
|
||||
|
||||
@@ -44,11 +44,11 @@ func init() {
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names.",
|
||||
Help: "Disables long file names",
|
||||
}},
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
@@ -59,7 +59,7 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
@@ -67,7 +67,6 @@ func init() {
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
|
||||
This flag disables warning messages on skipped symlinks or junction
|
||||
points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Default: false,
|
||||
@@ -75,21 +74,21 @@ points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (Deprecated)
|
||||
|
||||
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
|
||||
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places
|
||||
|
||||
- Windows
|
||||
- On some virtual filesystems (such ash LucidLink)
|
||||
- Android
|
||||
|
||||
So rclone now always reads the link.
|
||||
So rclone now always reads the link
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames.
|
||||
Help: `Apply unicode NFC normalization to paths and filenames
|
||||
|
||||
This flag can be used to normalize file names into unicode NFC form
|
||||
that are read from the local filesystem.
|
||||
@@ -107,7 +106,7 @@ routine so this flag shouldn't normally be used.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
Help: `Don't check to see if the files change during upload
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy
|
||||
@@ -153,7 +152,7 @@ to override the default choice.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_insensitive",
|
||||
Help: `Force the filesystem to report itself as case insensitive.
|
||||
Help: `Force the filesystem to report itself as case insensitive
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
@@ -162,7 +161,7 @@ to override the default choice.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files.
|
||||
Help: `Disable preallocation of disk space for transferred files
|
||||
|
||||
Preallocation of disk space helps prevent filesystem fragmentation.
|
||||
However, some virtual filesystem layers (such as Google Drive File
|
||||
@@ -173,7 +172,7 @@ Use this flag to disable preallocation.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads.
|
||||
Help: `Disable sparse files for multi-thread downloads
|
||||
|
||||
On Windows platforms rclone will make sparse files when doing
|
||||
multi-thread downloads. This avoids long pauses on large files where
|
||||
@@ -183,7 +182,7 @@ cause disk fragmentation and can be slow to work with.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime.
|
||||
Help: `Disable setting modtime
|
||||
|
||||
Normally rclone updates modification time of files after they are done
|
||||
uploading. This can cause permissions issues on Linux platforms when
|
||||
@@ -196,7 +195,7 @@ enabled, rclone will no longer update the modtime after copying a file.`,
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.OS,
|
||||
Default: defaultEnc,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -402,7 +401,7 @@ func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, erro
|
||||
|
||||
}
|
||||
if o.mode.IsDir() {
|
||||
return nil, fs.ErrorIsDir
|
||||
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
@@ -570,8 +569,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
|
||||
localPath := f.localPath(dir)
|
||||
err := file.MkdirAll(localPath, 0777)
|
||||
err := os.MkdirAll(localPath, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -765,7 +765,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// Create parent of destination
|
||||
dstParentPath := filepath.Dir(dstPath)
|
||||
err = file.MkdirAll(dstParentPath, 0777)
|
||||
err = os.MkdirAll(dstParentPath, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1099,7 +1099,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// mkdirAll makes all the directories needed to store the object
|
||||
func (o *Object) mkdirAll() error {
|
||||
dir := filepath.Dir(o.path)
|
||||
return file.MkdirAll(dir, 0777)
|
||||
return os.MkdirAll(dir, 0777)
|
||||
}
|
||||
|
||||
type nopWriterCloser struct {
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Device reading functions
|
||||
|
||||
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
|
||||
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
|
||||
|
||||
package local
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Device reading functions
|
||||
|
||||
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package local
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
//+build !windows
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
//+build windows
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !windows && !plan9 && !js
|
||||
// +build !windows,!plan9,!js
|
||||
|
||||
package local
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build windows || plan9 || js
|
||||
// +build windows plan9 js
|
||||
|
||||
package local
|
||||
|
||||
@@ -3,8 +3,6 @@ package local
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
)
|
||||
|
||||
// Test Windows character replacements
|
||||
@@ -23,7 +21,7 @@ func TestCleanWindows(t *testing.T) {
|
||||
t.Skipf("windows only")
|
||||
}
|
||||
for _, test := range testsWindows {
|
||||
got := cleanRootPath(test[0], true, encoder.OS)
|
||||
got := cleanRootPath(test[0], true, defaultEnc)
|
||||
expect := test[1]
|
||||
if got != expect {
|
||||
t.Fatalf("got %q, expected %q", got, expect)
|
||||
|
||||
@@ -87,11 +87,11 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name (usually email).",
|
||||
Help: "User name (usually email)",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
Help: "Password",
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
@@ -99,7 +99,6 @@ func init() {
|
||||
Default: true,
|
||||
Advanced: false,
|
||||
Help: `Skip full upload if there is another file with same data hash.
|
||||
|
||||
This feature is called "speedup" or "put by hash". It is especially efficient
|
||||
in case of generally available files like popular books, video or audio clips,
|
||||
because files are searched by hash in all accounts of all mailru users.
|
||||
@@ -120,7 +119,6 @@ streaming or partial uploads), it will not even try this optimization.`,
|
||||
Default: "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf",
|
||||
Advanced: true,
|
||||
Help: `Comma separated list of file name patterns eligible for speedup (put by hash).
|
||||
|
||||
Patterns are case insensitive and can contain '*' or '?' meta characters.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
@@ -139,9 +137,8 @@ Patterns are case insensitive and can contain '*' or '?' meta characters.`,
|
||||
Name: "speedup_max_disk",
|
||||
Default: fs.SizeSuffix(3 * 1024 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
Help: `This option allows you to disable speedup (put by hash) for large files.
|
||||
|
||||
Reason is that preliminary hashing can exhaust your RAM or disk space.`,
|
||||
Help: `This option allows you to disable speedup (put by hash) for large files
|
||||
(because preliminary hashing can exhaust you RAM or disk space)`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "0",
|
||||
Help: "Completely disable speedup (put by hash).",
|
||||
@@ -171,7 +168,7 @@ Reason is that preliminary hashing can exhaust your RAM or disk space.`,
|
||||
Name: "check_hash",
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
Help: "What should copy do if file checksum is mismatched or invalid.",
|
||||
Help: "What should copy do if file checksum is mismatched or invalid",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Fail with error.",
|
||||
@@ -185,7 +182,6 @@ Reason is that preliminary hashing can exhaust your RAM or disk space.`,
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: `HTTP user agent used internally by client.
|
||||
|
||||
Defaults to "rclone/VERSION" or "--user-agent" provided on command line.`,
|
||||
}, {
|
||||
Name: "quirks",
|
||||
@@ -193,7 +189,6 @@ Defaults to "rclone/VERSION" or "--user-agent" provided on command line.`,
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: `Comma separated list of internal maintenance flags.
|
||||
|
||||
This option must not be used by an ordinary user. It is intended only to
|
||||
facilitate remote troubleshooting of backend issues. Strict meaning of
|
||||
flags is not documented and not guaranteed to persist between releases.
|
||||
@@ -1963,7 +1958,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) error {
|
||||
}
|
||||
newObj, ok := entry.(*Object)
|
||||
if !ok || dirSize >= 0 {
|
||||
return fs.ErrorIsDir
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
if newObj.remote != o.remote {
|
||||
return fmt.Errorf("File %q path has changed to %q", o.remote, newObj.remote)
|
||||
|
||||
@@ -59,7 +59,7 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name.",
|
||||
Help: "User name",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
@@ -303,7 +303,7 @@ func (f *Fs) findObject(rootNode *mega.Node, file string) (node *mega.Node, err
|
||||
if err == mega.ENOENT {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
} else if err == nil && node.GetType() != mega.FILE {
|
||||
return nil, fs.ErrorIsDir // all other node types are directories
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return node, err
|
||||
}
|
||||
@@ -958,7 +958,7 @@ func (o *Object) Size() int64 {
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *mega.Node) (err error) {
|
||||
if info.GetType() != mega.FILE {
|
||||
return fs.ErrorIsDir // all other node types are directories
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
o.info = info
|
||||
return nil
|
||||
|
||||
@@ -129,12 +129,12 @@ Note that the chunks will be buffered into memory.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "drive_id",
|
||||
Help: "The ID of the drive to use.",
|
||||
Help: "The ID of the drive to use",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "drive_type",
|
||||
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
|
||||
Help: "The type of the drive ( " + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + " )",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -165,7 +165,7 @@ fall back to normal copy (which will be slightly slower).`,
|
||||
}, {
|
||||
Name: "no_versions",
|
||||
Default: false,
|
||||
Help: `Remove all versions on modifying operations.
|
||||
Help: `Remove all versions on modifying operations
|
||||
|
||||
Onedrive for business creates versions when rclone uploads new files
|
||||
overwriting an existing one and when it sets the modification time.
|
||||
@@ -186,10 +186,10 @@ this flag there.
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "anonymous",
|
||||
Help: "Anyone with the link has access, without needing to sign in.\nThis may include people outside of your organization.\nAnonymous link support may be disabled by an administrator.",
|
||||
Help: "Anyone with the link has access, without needing to sign in. This may include people outside of your organization. Anonymous link support may be disabled by an administrator.",
|
||||
}, {
|
||||
Value: "organization",
|
||||
Help: "Anyone signed into your organization (tenant) can use the link to get access.\nOnly available in OneDrive for Business and SharePoint.",
|
||||
Help: "Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.",
|
||||
}},
|
||||
}, {
|
||||
Name: "link_type",
|
||||
@@ -399,7 +399,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
Help: "Root Sharepoint site",
|
||||
}, {
|
||||
Value: "url",
|
||||
Help: "Sharepoint site name or URL\nE.g. mysite or https://contoso.sharepoint.com/sites/mysite",
|
||||
Help: "Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
|
||||
}, {
|
||||
Value: "search",
|
||||
Help: "Search for a Sharepoint site",
|
||||
@@ -411,7 +411,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
Help: "Type in SiteID (advanced)",
|
||||
}, {
|
||||
Value: "path",
|
||||
Help: "Sharepoint server-relative path (advanced)\nE.g. /teams/hr",
|
||||
Help: "Sharepoint server-relative path (advanced, e.g. /teams/hr)",
|
||||
}})
|
||||
case "choose_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
@@ -1715,7 +1715,7 @@ func (o *Object) Size() int64 {
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
if info.GetFolder() != nil {
|
||||
return fs.ErrorIsDir
|
||||
return errors.Wrapf(fs.ErrorNotAFile, "%q", o.remote)
|
||||
}
|
||||
o.hasMetaData = true
|
||||
o.size = info.GetSize()
|
||||
|
||||
@@ -42,7 +42,7 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "username",
|
||||
Help: "Username.",
|
||||
Help: "Username",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
|
||||
@@ -166,7 +166,6 @@ type Object struct {
|
||||
id string // ID of the object
|
||||
md5 string // MD5 if known
|
||||
sha1 string // SHA1 if known
|
||||
sha256 string // SHA256 if known
|
||||
link *api.GetFileLinkResult
|
||||
}
|
||||
|
||||
@@ -889,7 +888,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
//
|
||||
// https://forum.rclone.org/t/pcloud-to-local-no-hashes-in-common/19440
|
||||
if f.opt.Hostname == "eapi.pcloud.com" {
|
||||
return hash.Set(hash.SHA1 | hash.SHA256)
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
return hash.Set(hash.MD5 | hash.SHA1)
|
||||
}
|
||||
@@ -938,24 +937,19 @@ func (o *Object) getHashes(ctx context.Context) (err error) {
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
var pHash *string
|
||||
switch t {
|
||||
case hash.MD5:
|
||||
pHash = &o.md5
|
||||
case hash.SHA1:
|
||||
pHash = &o.sha1
|
||||
case hash.SHA256:
|
||||
pHash = &o.sha256
|
||||
default:
|
||||
if t != hash.MD5 && t != hash.SHA1 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if o.md5 == "" && o.sha1 == "" && o.sha256 == "" {
|
||||
if o.md5 == "" && o.sha1 == "" {
|
||||
err := o.getHashes(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get hash")
|
||||
}
|
||||
}
|
||||
return *pHash, nil
|
||||
if t == hash.MD5 {
|
||||
return o.md5, nil
|
||||
}
|
||||
return o.sha1, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
@@ -984,7 +978,6 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
func (o *Object) setHashes(hashes *api.Hashes) {
|
||||
o.sha1 = hashes.SHA1
|
||||
o.md5 = hashes.MD5
|
||||
o.sha256 = hashes.SHA256
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
@@ -1099,10 +1092,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
modTime := src.ModTime(ctx)
|
||||
remote := o.Remote()
|
||||
|
||||
if size < 0 {
|
||||
return errors.New("can't upload unknown sizes objects")
|
||||
}
|
||||
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
@@ -1165,14 +1154,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
})
|
||||
if err != nil {
|
||||
// sometimes pcloud leaves a half complete file on
|
||||
// error, so delete it if it exists, trying a few times
|
||||
for i := 0; i < 5; i++ {
|
||||
delObj, delErr := o.fs.NewObject(ctx, o.remote)
|
||||
if delErr == nil && delObj != nil {
|
||||
_ = delObj.Remove(ctx)
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
// error, so delete it if it exists
|
||||
delObj, delErr := o.fs.NewObject(ctx, o.remote)
|
||||
if delErr == nil && delObj != nil {
|
||||
_ = delObj.Remove(ctx)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -58,7 +58,6 @@ type FolderListResponse struct {
|
||||
Content []Item `json:"content"`
|
||||
Name string `json:"name,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
FolderID string `json:"folder_id,omitempty"`
|
||||
}
|
||||
|
||||
// FolderCreateResponse is the response to folder/create
|
||||
|
||||
@@ -193,7 +193,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, directoriesOn
|
||||
}
|
||||
|
||||
lcLeaf := strings.ToLower(leaf)
|
||||
_, found, err := f.listAll(ctx, directoryID, directoriesOnly, filesOnly, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, directoryID, directoriesOnly, filesOnly, func(item *api.Item) bool {
|
||||
if strings.ToLower(item.Name) == lcLeaf {
|
||||
info = item
|
||||
return true
|
||||
@@ -345,18 +345,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
var newDirID string
|
||||
newDirID, found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
// Update the Root directory ID to its actual value
|
||||
if pathID == rootID {
|
||||
f.dirCache.SetRootIDAlias(newDirID)
|
||||
}
|
||||
return pathIDOut, found, err
|
||||
}
|
||||
|
||||
@@ -400,17 +395,13 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
//
|
||||
// It returns a newDirID which is what the system returned as the directory ID
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (newDirID string, found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folder/list",
|
||||
Parameters: f.baseParams(),
|
||||
}
|
||||
if dirID != rootID {
|
||||
opts.Parameters.Set("id", dirID)
|
||||
}
|
||||
opts.Parameters.Set("id", dirID)
|
||||
opts.Parameters.Set("includebreadcrumbs", "false")
|
||||
|
||||
var result api.FolderListResponse
|
||||
@@ -420,12 +411,11 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return newDirID, found, errors.Wrap(err, "couldn't list files")
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
if err = result.AsErr(); err != nil {
|
||||
return newDirID, found, errors.Wrap(err, "error while listing")
|
||||
return found, errors.Wrap(err, "error while listing")
|
||||
}
|
||||
newDirID = result.FolderID
|
||||
for i := range result.Content {
|
||||
item := &result.Content[i]
|
||||
if item.Type == api.ItemTypeFolder {
|
||||
@@ -446,6 +436,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -464,7 +455,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, _, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.Type == api.ItemTypeFolder {
|
||||
// cache the directory ID for later lookups
|
||||
@@ -568,7 +559,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
|
||||
// need to check if empty as it will delete recursively by default
|
||||
if check {
|
||||
_, found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool {
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
@@ -660,11 +651,10 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
||||
"id": {newDirectoryID},
|
||||
},
|
||||
}
|
||||
opts.MultipartParams.Set("items[0][id]", id)
|
||||
if isFile {
|
||||
opts.MultipartParams.Set("items[0][type]", "file")
|
||||
opts.MultipartParams.Set("files[]", id)
|
||||
} else {
|
||||
opts.MultipartParams.Set("items[0][type]", "folder")
|
||||
opts.MultipartParams.Set("folders[]", id)
|
||||
}
|
||||
//replacedLeaf := enc.FromStandardName(leaf)
|
||||
var resp *http.Response
|
||||
|
||||
@@ -151,7 +151,7 @@ func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) {
|
||||
return nil, err
|
||||
}
|
||||
if resp.File.IsDir() {
|
||||
return nil, fs.ErrorIsDir
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return &resp.File, err
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// Package qingstor provides an interface to QingStor object storage
|
||||
// Home: https://www.qingcloud.com/
|
||||
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package qingstor
|
||||
@@ -40,36 +39,36 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "env_auth",
|
||||
Help: "Get QingStor credentials from runtime.\n\nOnly applies if access_key_id and secret_access_key is blank.",
|
||||
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter QingStor credentials in the next step.",
|
||||
Help: "Enter QingStor credentials in the next step",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get QingStor credentials from the environment (env vars or IAM).",
|
||||
Help: "Get QingStor credentials from the environment (env vars or IAM)",
|
||||
}},
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Enter an endpoint URL to connection QingStor API.\n\nLeave blank will use the default value \"https://qingstor.com:443\".",
|
||||
Help: "Enter an endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"",
|
||||
}, {
|
||||
Name: "zone",
|
||||
Help: "Zone to connect to.\n\nDefault is \"pek3a\".",
|
||||
Help: "Zone to connect to.\nDefault is \"pek3a\".",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "pek3a",
|
||||
Help: "The Beijing (China) Three Zone.\nNeeds location constraint pek3a.",
|
||||
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
|
||||
}, {
|
||||
Value: "sh1a",
|
||||
Help: "The Shanghai (China) First Zone.\nNeeds location constraint sh1a.",
|
||||
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
|
||||
}, {
|
||||
Value: "gd2a",
|
||||
Help: "The Guangdong (China) Second Zone.\nNeeds location constraint gd2a.",
|
||||
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
|
||||
}},
|
||||
}, {
|
||||
Name: "connection_retries",
|
||||
@@ -78,7 +77,7 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Test QingStor filesystem interface
|
||||
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package qingstor
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || js
|
||||
// +build plan9 js
|
||||
|
||||
package qingstor
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Upload object to QingStor
|
||||
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package qingstor
|
||||
|
||||
338
backend/s3/s3.go
338
backend/s3/s3.go
@@ -109,21 +109,21 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\n\nOnly applies if access_key_id and secret_access_key is blank.",
|
||||
Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter AWS credentials in the next step.",
|
||||
Help: "Enter AWS credentials in the next step",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get AWS credentials from the environment (env vars or IAM).",
|
||||
Help: "Get AWS credentials from the environment (env vars or IAM)",
|
||||
}},
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "AWS Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
// References:
|
||||
// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||
@@ -136,76 +136,76 @@ func init() {
|
||||
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia, or Pacific Northwest.\nLeave location constraint empty.",
|
||||
}, {
|
||||
Value: "us-east-2",
|
||||
Help: "US East (Ohio) Region.\nNeeds location constraint us-east-2.",
|
||||
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
|
||||
}, {
|
||||
Value: "us-west-1",
|
||||
Help: "US West (Northern California) Region.\nNeeds location constraint us-west-1.",
|
||||
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
|
||||
}, {
|
||||
Value: "us-west-2",
|
||||
Help: "US West (Oregon) Region.\nNeeds location constraint us-west-2.",
|
||||
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
|
||||
}, {
|
||||
Value: "ca-central-1",
|
||||
Help: "Canada (Central) Region.\nNeeds location constraint ca-central-1.",
|
||||
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
|
||||
}, {
|
||||
Value: "eu-west-1",
|
||||
Help: "EU (Ireland) Region.\nNeeds location constraint EU or eu-west-1.",
|
||||
Help: "EU (Ireland) Region\nNeeds location constraint EU or eu-west-1.",
|
||||
}, {
|
||||
Value: "eu-west-2",
|
||||
Help: "EU (London) Region.\nNeeds location constraint eu-west-2.",
|
||||
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
|
||||
}, {
|
||||
Value: "eu-west-3",
|
||||
Help: "EU (Paris) Region.\nNeeds location constraint eu-west-3.",
|
||||
Help: "EU (Paris) Region\nNeeds location constraint eu-west-3.",
|
||||
}, {
|
||||
Value: "eu-north-1",
|
||||
Help: "EU (Stockholm) Region.\nNeeds location constraint eu-north-1.",
|
||||
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
|
||||
}, {
|
||||
Value: "eu-south-1",
|
||||
Help: "EU (Milan) Region.\nNeeds location constraint eu-south-1.",
|
||||
Help: "EU (Milan) Region\nNeeds location constraint eu-south-1.",
|
||||
}, {
|
||||
Value: "eu-central-1",
|
||||
Help: "EU (Frankfurt) Region.\nNeeds location constraint eu-central-1.",
|
||||
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
|
||||
}, {
|
||||
Value: "ap-southeast-1",
|
||||
Help: "Asia Pacific (Singapore) Region.\nNeeds location constraint ap-southeast-1.",
|
||||
Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
|
||||
}, {
|
||||
Value: "ap-southeast-2",
|
||||
Help: "Asia Pacific (Sydney) Region.\nNeeds location constraint ap-southeast-2.",
|
||||
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
|
||||
}, {
|
||||
Value: "ap-northeast-1",
|
||||
Help: "Asia Pacific (Tokyo) Region.\nNeeds location constraint ap-northeast-1.",
|
||||
Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
|
||||
}, {
|
||||
Value: "ap-northeast-2",
|
||||
Help: "Asia Pacific (Seoul).\nNeeds location constraint ap-northeast-2.",
|
||||
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
|
||||
}, {
|
||||
Value: "ap-northeast-3",
|
||||
Help: "Asia Pacific (Osaka-Local).\nNeeds location constraint ap-northeast-3.",
|
||||
Help: "Asia Pacific (Osaka-Local)\nNeeds location constraint ap-northeast-3.",
|
||||
}, {
|
||||
Value: "ap-south-1",
|
||||
Help: "Asia Pacific (Mumbai).\nNeeds location constraint ap-south-1.",
|
||||
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
|
||||
}, {
|
||||
Value: "ap-east-1",
|
||||
Help: "Asia Pacific (Hong Kong) Region.\nNeeds location constraint ap-east-1.",
|
||||
Help: "Asia Pacific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
|
||||
}, {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region.\nNeeds location constraint sa-east-1.",
|
||||
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
|
||||
}, {
|
||||
Value: "me-south-1",
|
||||
Help: "Middle East (Bahrain) Region.\nNeeds location constraint me-south-1.",
|
||||
Help: "Middle East (Bahrain) Region\nNeeds location constraint me-south-1.",
|
||||
}, {
|
||||
Value: "af-south-1",
|
||||
Help: "Africa (Cape Town) Region.\nNeeds location constraint af-south-1.",
|
||||
Help: "Africa (Cape Town) Region\nNeeds location constraint af-south-1.",
|
||||
}, {
|
||||
Value: "cn-north-1",
|
||||
Help: "China (Beijing) Region.\nNeeds location constraint cn-north-1.",
|
||||
Help: "China (Beijing) Region\nNeeds location constraint cn-north-1.",
|
||||
}, {
|
||||
Value: "cn-northwest-1",
|
||||
Help: "China (Ningxia) Region.\nNeeds location constraint cn-northwest-1.",
|
||||
Help: "China (Ningxia) Region\nNeeds location constraint cn-northwest-1.",
|
||||
}, {
|
||||
Value: "us-gov-east-1",
|
||||
Help: "AWS GovCloud (US-East) Region.\nNeeds location constraint us-gov-east-1.",
|
||||
Help: "AWS GovCloud (US-East) Region\nNeeds location constraint us-gov-east-1.",
|
||||
}, {
|
||||
Value: "us-gov-west-1",
|
||||
Help: "AWS GovCloud (US) Region.\nNeeds location constraint us-gov-west-1.",
|
||||
Help: "AWS GovCloud (US) Region\nNeeds location constraint us-gov-west-1.",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
@@ -220,22 +220,22 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,Scaleway,TencentCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
||||
}, {
|
||||
Value: "other-v2-signature",
|
||||
Help: "Use this only if v4 signatures don't work.\nE.g. pre Jewel/v10 CEPH.",
|
||||
Help: "Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nLeave blank if using AWS to use the default endpoint for the region.",
|
||||
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.",
|
||||
Provider: "AWS",
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for IBM COS S3 API.\n\nSpecify if using an IBM COS On Premise.",
|
||||
Help: "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.",
|
||||
Provider: "IBMCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3.us.cloud-object-storage.appdomain.cloud",
|
||||
@@ -537,65 +537,65 @@ func init() {
|
||||
Provider: "TencentCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "cos.ap-beijing.myqcloud.com",
|
||||
Help: "Beijing Region",
|
||||
Help: "Beijing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-nanjing.myqcloud.com",
|
||||
Help: "Nanjing Region",
|
||||
Help: "Nanjing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-shanghai.myqcloud.com",
|
||||
Help: "Shanghai Region",
|
||||
Help: "Shanghai Region.",
|
||||
}, {
|
||||
Value: "cos.ap-guangzhou.myqcloud.com",
|
||||
Help: "Guangzhou Region",
|
||||
Help: "Guangzhou Region.",
|
||||
}, {
|
||||
Value: "cos.ap-nanjing.myqcloud.com",
|
||||
Help: "Nanjing Region",
|
||||
Help: "Nanjing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-chengdu.myqcloud.com",
|
||||
Help: "Chengdu Region",
|
||||
Help: "Chengdu Region.",
|
||||
}, {
|
||||
Value: "cos.ap-chongqing.myqcloud.com",
|
||||
Help: "Chongqing Region",
|
||||
Help: "Chongqing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-hongkong.myqcloud.com",
|
||||
Help: "Hong Kong (China) Region",
|
||||
Help: "Hong Kong (China) Region.",
|
||||
}, {
|
||||
Value: "cos.ap-singapore.myqcloud.com",
|
||||
Help: "Singapore Region",
|
||||
Help: "Singapore Region.",
|
||||
}, {
|
||||
Value: "cos.ap-mumbai.myqcloud.com",
|
||||
Help: "Mumbai Region",
|
||||
Help: "Mumbai Region.",
|
||||
}, {
|
||||
Value: "cos.ap-seoul.myqcloud.com",
|
||||
Help: "Seoul Region",
|
||||
Help: "Seoul Region.",
|
||||
}, {
|
||||
Value: "cos.ap-bangkok.myqcloud.com",
|
||||
Help: "Bangkok Region",
|
||||
Help: "Bangkok Region.",
|
||||
}, {
|
||||
Value: "cos.ap-tokyo.myqcloud.com",
|
||||
Help: "Tokyo Region",
|
||||
Help: "Tokyo Region.",
|
||||
}, {
|
||||
Value: "cos.na-siliconvalley.myqcloud.com",
|
||||
Help: "Silicon Valley Region",
|
||||
Help: "Silicon Valley Region.",
|
||||
}, {
|
||||
Value: "cos.na-ashburn.myqcloud.com",
|
||||
Help: "Virginia Region",
|
||||
Help: "Virginia Region.",
|
||||
}, {
|
||||
Value: "cos.na-toronto.myqcloud.com",
|
||||
Help: "Toronto Region",
|
||||
Help: "Toronto Region.",
|
||||
}, {
|
||||
Value: "cos.eu-frankfurt.myqcloud.com",
|
||||
Help: "Frankfurt Region",
|
||||
Help: "Frankfurt Region.",
|
||||
}, {
|
||||
Value: "cos.eu-moscow.myqcloud.com",
|
||||
Help: "Moscow Region",
|
||||
Help: "Moscow Region.",
|
||||
}, {
|
||||
Value: "cos.accelerate.myqcloud.com",
|
||||
Help: "Use Tencent COS Accelerate Endpoint",
|
||||
Help: "Use Tencent COS Accelerate Endpoint.",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
@@ -629,94 +629,90 @@ func init() {
|
||||
Value: "s3.eu-central-1.wasabisys.com",
|
||||
Help: "Wasabi EU Central endpoint",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-northeast-1.wasabisys.com",
|
||||
Help: "Wasabi AP Northeast endpoint",
|
||||
Provider: "Wasabi",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nUsed when creating buckets only.",
|
||||
Help: "Location constraint - must be set to match the Region.\nUsed when creating buckets only.",
|
||||
Provider: "AWS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Empty for US Region, Northern Virginia, or Pacific Northwest",
|
||||
Help: "Empty for US Region, Northern Virginia, or Pacific Northwest.",
|
||||
}, {
|
||||
Value: "us-east-2",
|
||||
Help: "US East (Ohio) Region",
|
||||
Help: "US East (Ohio) Region.",
|
||||
}, {
|
||||
Value: "us-west-1",
|
||||
Help: "US West (Northern California) Region",
|
||||
Help: "US West (Northern California) Region.",
|
||||
}, {
|
||||
Value: "us-west-2",
|
||||
Help: "US West (Oregon) Region",
|
||||
Help: "US West (Oregon) Region.",
|
||||
}, {
|
||||
Value: "ca-central-1",
|
||||
Help: "Canada (Central) Region",
|
||||
Help: "Canada (Central) Region.",
|
||||
}, {
|
||||
Value: "eu-west-1",
|
||||
Help: "EU (Ireland) Region",
|
||||
Help: "EU (Ireland) Region.",
|
||||
}, {
|
||||
Value: "eu-west-2",
|
||||
Help: "EU (London) Region",
|
||||
Help: "EU (London) Region.",
|
||||
}, {
|
||||
Value: "eu-west-3",
|
||||
Help: "EU (Paris) Region",
|
||||
Help: "EU (Paris) Region.",
|
||||
}, {
|
||||
Value: "eu-north-1",
|
||||
Help: "EU (Stockholm) Region",
|
||||
Help: "EU (Stockholm) Region.",
|
||||
}, {
|
||||
Value: "eu-south-1",
|
||||
Help: "EU (Milan) Region",
|
||||
Help: "EU (Milan) Region.",
|
||||
}, {
|
||||
Value: "EU",
|
||||
Help: "EU Region",
|
||||
Help: "EU Region.",
|
||||
}, {
|
||||
Value: "ap-southeast-1",
|
||||
Help: "Asia Pacific (Singapore) Region",
|
||||
Help: "Asia Pacific (Singapore) Region.",
|
||||
}, {
|
||||
Value: "ap-southeast-2",
|
||||
Help: "Asia Pacific (Sydney) Region",
|
||||
Help: "Asia Pacific (Sydney) Region.",
|
||||
}, {
|
||||
Value: "ap-northeast-1",
|
||||
Help: "Asia Pacific (Tokyo) Region",
|
||||
Help: "Asia Pacific (Tokyo) Region.",
|
||||
}, {
|
||||
Value: "ap-northeast-2",
|
||||
Help: "Asia Pacific (Seoul) Region",
|
||||
Help: "Asia Pacific (Seoul) Region.",
|
||||
}, {
|
||||
Value: "ap-northeast-3",
|
||||
Help: "Asia Pacific (Osaka-Local) Region",
|
||||
Help: "Asia Pacific (Osaka-Local) Region.",
|
||||
}, {
|
||||
Value: "ap-south-1",
|
||||
Help: "Asia Pacific (Mumbai) Region",
|
||||
Help: "Asia Pacific (Mumbai) Region.",
|
||||
}, {
|
||||
Value: "ap-east-1",
|
||||
Help: "Asia Pacific (Hong Kong) Region",
|
||||
Help: "Asia Pacific (Hong Kong) Region.",
|
||||
}, {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region",
|
||||
Help: "South America (Sao Paulo) Region.",
|
||||
}, {
|
||||
Value: "me-south-1",
|
||||
Help: "Middle East (Bahrain) Region",
|
||||
Help: "Middle East (Bahrain) Region.",
|
||||
}, {
|
||||
Value: "af-south-1",
|
||||
Help: "Africa (Cape Town) Region",
|
||||
Help: "Africa (Cape Town) Region.",
|
||||
}, {
|
||||
Value: "cn-north-1",
|
||||
Help: "China (Beijing) Region",
|
||||
}, {
|
||||
Value: "cn-northwest-1",
|
||||
Help: "China (Ningxia) Region",
|
||||
Help: "China (Ningxia) Region.",
|
||||
}, {
|
||||
Value: "us-gov-east-1",
|
||||
Help: "AWS GovCloud (US-East) Region",
|
||||
Help: "AWS GovCloud (US-East) Region.",
|
||||
}, {
|
||||
Value: "us-gov-west-1",
|
||||
Help: "AWS GovCloud (US) Region",
|
||||
Help: "AWS GovCloud (US) Region.",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must match endpoint when using IBM Cloud Public.\n\nFor on-prem COS, do not make a selection from this list, hit enter.",
|
||||
Help: "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter",
|
||||
Provider: "IBMCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us-standard",
|
||||
@@ -817,7 +813,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath,TencentCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
@@ -831,27 +827,27 @@ Note that this ACL is applied when server-side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
|
||||
Help: "Owner gets Full_CONTROL. No one else has access rights (default).",
|
||||
Provider: "TencentCOS",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL.\nNo one else has access rights (default).",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
Provider: "!IBMCOS,TencentCOS",
|
||||
}, {
|
||||
Value: "public-read",
|
||||
Help: "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ access.",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
||||
Provider: "!IBMCOS",
|
||||
}, {
|
||||
Value: "public-read-write",
|
||||
Help: "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
|
||||
Provider: "!IBMCOS",
|
||||
}, {
|
||||
Value: "authenticated-read",
|
||||
Help: "Owner gets FULL_CONTROL.\nThe AuthenticatedUsers group gets READ access.",
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
|
||||
Provider: "!IBMCOS",
|
||||
}, {
|
||||
Value: "bucket-owner-read",
|
||||
Help: "Object owner gets FULL_CONTROL.\nBucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
|
||||
Help: "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
|
||||
Provider: "!IBMCOS",
|
||||
}, {
|
||||
Value: "bucket-owner-full-control",
|
||||
@@ -859,19 +855,19 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
Provider: "!IBMCOS",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL.\nNo one else has access rights (default).\nThis acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS.",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS",
|
||||
Provider: "IBMCOS",
|
||||
}, {
|
||||
Value: "public-read",
|
||||
Help: "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ access.\nThis acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS.",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS",
|
||||
Provider: "IBMCOS",
|
||||
}, {
|
||||
Value: "public-read-write",
|
||||
Help: "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ and WRITE access.\nThis acl is available on IBM Cloud (Infra), On-Premise IBM COS.",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS",
|
||||
Provider: "IBMCOS",
|
||||
}, {
|
||||
Value: "authenticated-read",
|
||||
Help: "Owner gets FULL_CONTROL.\nThe AuthenticatedUsers group gets READ access.\nNot supported on Buckets.\nThis acl is available on IBM Cloud (Infra) and On-Premise IBM COS.",
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
|
||||
Provider: "IBMCOS",
|
||||
}},
|
||||
}, {
|
||||
@@ -885,16 +881,16 @@ isn't set then "acl" is used instead.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL.\nNo one else has access rights (default).",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
}, {
|
||||
Value: "public-read",
|
||||
Help: "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ access.",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
||||
}, {
|
||||
Value: "public-read-write",
|
||||
Help: "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
|
||||
}, {
|
||||
Value: "authenticated-read",
|
||||
Help: "Owner gets FULL_CONTROL.\nThe AuthenticatedUsers group gets READ access.",
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "requester_pays",
|
||||
@@ -1002,10 +998,10 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Archive storage mode",
|
||||
Help: "Archive storage mode.",
|
||||
}, {
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode",
|
||||
Help: "Infrequent access storage mode.",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
||||
@@ -1020,10 +1016,10 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "ARCHIVE",
|
||||
Help: "Archive storage mode",
|
||||
Help: "Archive storage mode.",
|
||||
}, {
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode",
|
||||
Help: "Infrequent access storage mode.",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
|
||||
@@ -1032,17 +1028,17 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Provider: "Scaleway",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Default.",
|
||||
Help: "Default",
|
||||
}, {
|
||||
Value: "STANDARD",
|
||||
Help: "The Standard class for any upload.\nSuitable for on-demand content like streaming or CDN.",
|
||||
Help: "The Standard class for any upload; suitable for on-demand content like streaming or CDN.",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Archived storage.\nPrices are lower, but it needs to be restored first to be accessed.",
|
||||
Help: "Archived storage; prices are lower, but it needs to be restored first to be accessed.",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
@@ -1090,7 +1086,7 @@ large file of a known size to stay below this number of chunks limit.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy.
|
||||
Help: `Cutoff for switching to multipart copy
|
||||
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of this size.
|
||||
@@ -1100,7 +1096,7 @@ The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Don't store MD5 checksum with object metadata.
|
||||
Help: `Don't store MD5 checksum with object metadata
|
||||
|
||||
Normally rclone will calculate the MD5 checksum of the input before
|
||||
uploading it so it can add it to metadata on the object. This is great
|
||||
@@ -1110,7 +1106,7 @@ to start uploading.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shared_credentials_file",
|
||||
Help: `Path to the shared credentials file.
|
||||
Help: `Path to the shared credentials file
|
||||
|
||||
If env_auth = true then rclone can use a shared credentials file.
|
||||
|
||||
@@ -1124,7 +1120,7 @@ it will default to the current user's home directory.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "profile",
|
||||
Help: `Profile to use in the shared credentials file.
|
||||
Help: `Profile to use in the shared credentials file
|
||||
|
||||
If env_auth = true then rclone can use a shared credentials file. This
|
||||
variable controls which profile is used in that file.
|
||||
@@ -1135,7 +1131,7 @@ If empty it will default to the environment variable "AWS_PROFILE" or
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "session_token",
|
||||
Help: "An AWS session token.",
|
||||
Help: "An AWS session token",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
@@ -1205,7 +1201,7 @@ In Ceph, this can be increased with the "rgw list buckets max chunk" option.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_bucket",
|
||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||
Help: `If set, don't attempt to check the bucket exists or create it
|
||||
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does if you know the bucket exists already.
|
||||
@@ -1218,7 +1214,7 @@ due to a bug.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head",
|
||||
Help: `If set, don't HEAD uploaded objects to check integrity.
|
||||
Help: `If set, don't HEAD uploaded objects to check integrity
|
||||
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does.
|
||||
@@ -1251,7 +1247,7 @@ very small even with this flag.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head_object",
|
||||
Help: `If set, do not do HEAD before GET when getting objects.`,
|
||||
Help: `If set, don't HEAD objects`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -1276,7 +1272,6 @@ very small even with this flag.
|
||||
Default: memoryPoolFlushTime,
|
||||
Advanced: true,
|
||||
Help: `How often internal memory buffer pools will be flushed.
|
||||
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.`,
|
||||
}, {
|
||||
@@ -1288,7 +1283,7 @@ This option controls how often unused buffers will be removed from the pool.`,
|
||||
Name: "disable_http2",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `Disable usage of http2 for S3 backends.
|
||||
Help: `Disable usage of http2 for S3 backends
|
||||
|
||||
There is currently an unsolved issue with the s3 (specifically minio) backend
|
||||
and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be
|
||||
@@ -1297,12 +1292,6 @@ disabled here. When the issue is solved this flag will be removed.
|
||||
See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631
|
||||
|
||||
`,
|
||||
}, {
|
||||
Name: "download_url",
|
||||
Help: `Custom endpoint for downloads.
|
||||
This is usually set to a CloudFront CDN URL as AWS S3 offers
|
||||
cheaper egress for data downloaded through the CloudFront network.`,
|
||||
Advanced: true,
|
||||
},
|
||||
}})
|
||||
}
|
||||
@@ -1364,7 +1353,6 @@ type Options struct {
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -1382,7 +1370,6 @@ type Fs struct {
|
||||
cache *bucket.Cache // cache for bucket creation status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
srvRest *rest.Client // the rest connection to the server
|
||||
pool *pool.Pool // memory pool
|
||||
etagIsNotMD5 bool // if set ETags are not MD5s
|
||||
}
|
||||
@@ -1509,7 +1496,6 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
// Make the auth
|
||||
v := credentials.Value{
|
||||
AccessKeyID: opt.AccessKeyID,
|
||||
@@ -1580,7 +1566,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
opt.MaxUploadParts = 1000
|
||||
}
|
||||
awsConfig := aws.NewConfig().
|
||||
WithMaxRetries(ci.LowLevelRetries).
|
||||
WithMaxRetries(0). // Rely on rclone's retry logic
|
||||
WithCredentials(cred).
|
||||
WithHTTPClient(client).
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle).
|
||||
@@ -1603,13 +1589,6 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
awsSessionOpts.SharedConfigState = session.SharedConfigEnable
|
||||
// Set the name of the profile if supplied
|
||||
awsSessionOpts.Profile = opt.Profile
|
||||
// Set the shared config file if supplied
|
||||
if opt.SharedCredentialsFile != "" {
|
||||
awsSessionOpts.SharedConfigFiles = []string{opt.SharedCredentialsFile}
|
||||
}
|
||||
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
|
||||
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
|
||||
awsSessionOpts.Config.Credentials = nil
|
||||
}
|
||||
ses, err := session.NewSessionWithOptions(awsSessionOpts)
|
||||
if err != nil {
|
||||
@@ -1702,23 +1681,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
pc := fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep)))
|
||||
// Set pacer retries to 2 (1 try and 1 retry) because we are
|
||||
// relying on SDK retry mechanism, but we allow 2 attempts to
|
||||
// retry directory listings after XMLSyntaxError
|
||||
pc.SetRetries(2)
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
ctx: ctx,
|
||||
c: c,
|
||||
ses: ses,
|
||||
pacer: pc,
|
||||
cache: bucket.NewCache(),
|
||||
srv: srv,
|
||||
srvRest: rest.NewClient(fshttp.NewClient(ctx)),
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
ctx: ctx,
|
||||
c: c,
|
||||
ses: ses,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
srv: srv,
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
@@ -2892,12 +2864,14 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
func (o *Object) setMetaData(etag *string, contentLength *int64, lastModified *time.Time, meta map[string]*string, mimeType *string, storageClass *string) {
|
||||
var size int64
|
||||
// Ignore missing Content-Length assuming it is 0
|
||||
// Some versions of ceph do this due their apache proxies
|
||||
if contentLength != nil {
|
||||
o.bytes = *contentLength
|
||||
size = *contentLength
|
||||
}
|
||||
o.setMD5FromEtag(aws.StringValue(etag))
|
||||
o.bytes = size
|
||||
o.meta = meta
|
||||
if o.meta == nil {
|
||||
o.meta = map[string]*string{}
|
||||
@@ -2980,71 +2954,9 @@ func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
url := o.fs.opt.DownloadURL + bucketPath
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: url,
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srvRest.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to parse content length from string %s, %v", resp.Header.Get("Content-Length"), err)
|
||||
}
|
||||
contentLength := &size
|
||||
if resp.Header.Get("Content-Range") != "" {
|
||||
var contentRange = resp.Header.Get("Content-Range")
|
||||
slash := strings.IndexRune(contentRange, '/')
|
||||
if slash >= 0 {
|
||||
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
|
||||
if err == nil {
|
||||
contentLength = &i
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find length in %q", contentRange)
|
||||
}
|
||||
}
|
||||
|
||||
lastModified, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to parse last modified from string %s, %v", resp.Header.Get("Last-Modified"), err)
|
||||
}
|
||||
|
||||
metaData := make(map[string]*string)
|
||||
for key, value := range resp.Header {
|
||||
if strings.HasPrefix(key, "x-amz-meta") {
|
||||
metaKey := strings.TrimPrefix(key, "x-amz-meta-")
|
||||
metaData[strings.Title(metaKey)] = &value[0]
|
||||
}
|
||||
}
|
||||
|
||||
storageClass := resp.Header.Get("X-Amz-Storage-Class")
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
etag := resp.Header.Get("Etag")
|
||||
|
||||
o.setMetaData(&etag, contentLength, &lastModified, metaData, &contentType, &storageClass)
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
|
||||
if o.fs.opt.DownloadURL != "" {
|
||||
return o.downloadFromURL(ctx, bucketPath, options...)
|
||||
}
|
||||
|
||||
req := s3.GetObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &bucketPath,
|
||||
|
||||
@@ -60,41 +60,41 @@ func init() {
|
||||
Config: Config,
|
||||
Options: []fs.Option{{
|
||||
Name: configURL,
|
||||
Help: "URL of seafile host to connect to.",
|
||||
Help: "URL of seafile host to connect to",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://cloud.seafile.com/",
|
||||
Help: "Connect to cloud.seafile.com.",
|
||||
Help: "Connect to cloud.seafile.com",
|
||||
}},
|
||||
}, {
|
||||
Name: configUser,
|
||||
Help: "User name (usually email address).",
|
||||
Help: "User name (usually email address)",
|
||||
Required: true,
|
||||
}, {
|
||||
// Password is not required, it will be left blank for 2FA
|
||||
Name: configPassword,
|
||||
Help: "Password.",
|
||||
Help: "Password",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: config2FA,
|
||||
Help: "Two-factor authentication ('true' if the account has 2FA enabled).",
|
||||
Help: "Two-factor authentication ('true' if the account has 2FA enabled)",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: configLibrary,
|
||||
Help: "Name of the library.\n\nLeave blank to access all non-encrypted libraries.",
|
||||
Help: "Name of the library. Leave blank to access all non-encrypted libraries.",
|
||||
}, {
|
||||
Name: configLibraryKey,
|
||||
Help: "Library password (for encrypted libraries only).\n\nLeave blank if you pass it through the command line.",
|
||||
Help: "Library password (for encrypted libraries only). Leave blank if you pass it through the command line.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: configCreateLibrary,
|
||||
Help: "Should rclone create a library if it doesn't exist.",
|
||||
Help: "Should rclone create a library if it doesn't exist",
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
// Keep the authentication token after entering the 2FA code
|
||||
Name: configAuthToken,
|
||||
Help: "Authentication token.",
|
||||
Help: "Authentication token",
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
@@ -310,8 +310,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
|
||||
is2faEnabled, _ := m.Get(config2FA)
|
||||
if is2faEnabled != "true" {
|
||||
// no need to do anything here
|
||||
return nil, nil
|
||||
return nil, errors.New("two-factor authentication is not enabled on this account")
|
||||
}
|
||||
|
||||
username, _ := m.Get(configUser)
|
||||
@@ -326,20 +325,17 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
|
||||
switch config.State {
|
||||
case "":
|
||||
// Empty state means it's the first call to the Config function
|
||||
// Just make sure we do have a password
|
||||
if password == "" {
|
||||
return fs.ConfigPassword("password", "config_password", "Two-factor authentication: please enter your password (it won't be saved in the configuration)")
|
||||
return fs.ConfigPassword("", "config_password", "Two-factor authentication: please enter your password (it won't be saved in the configuration)")
|
||||
}
|
||||
// password was successfully loaded from the config
|
||||
return fs.ConfigGoto("2fa")
|
||||
return fs.ConfigGoto("password")
|
||||
case "password":
|
||||
// password should be coming from the previous state (entered by the user)
|
||||
password = config.Result
|
||||
if password == "" {
|
||||
return fs.ConfigError("", "Password can't be blank")
|
||||
return fs.ConfigError("password", "Password can't be blank")
|
||||
}
|
||||
// save it into the configuration file and keep going
|
||||
m.Set(configPassword, obscure.MustObscure(password))
|
||||
m.Set(configPassword, obscure.MustObscure(config.Result))
|
||||
return fs.ConfigGoto("2fa")
|
||||
case "2fa":
|
||||
return fs.ConfigInput("2fa_do", "config_2fa", "Two-factor authentication: please enter your 2FA code")
|
||||
|
||||
@@ -1,15 +1,10 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type pathData struct {
|
||||
@@ -24,77 +19,77 @@ type pathData struct {
|
||||
// from a mix of configuration data and path command line argument
|
||||
func TestSplitPath(t *testing.T) {
|
||||
testData := []pathData{
|
||||
{
|
||||
pathData{
|
||||
configLibrary: "",
|
||||
configRoot: "",
|
||||
argumentPath: "",
|
||||
expectedLibrary: "",
|
||||
expectedPath: "",
|
||||
},
|
||||
{
|
||||
pathData{
|
||||
configLibrary: "",
|
||||
configRoot: "",
|
||||
argumentPath: "Library",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "",
|
||||
},
|
||||
{
|
||||
pathData{
|
||||
configLibrary: "",
|
||||
configRoot: "",
|
||||
argumentPath: path.Join("Library", "path", "to", "file"),
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("path", "to", "file"),
|
||||
},
|
||||
{
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "",
|
||||
argumentPath: "",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "",
|
||||
},
|
||||
{
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "",
|
||||
argumentPath: "path",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "path",
|
||||
},
|
||||
{
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "",
|
||||
argumentPath: path.Join("path", "to", "file"),
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("path", "to", "file"),
|
||||
},
|
||||
{
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "root",
|
||||
argumentPath: "",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "root",
|
||||
},
|
||||
{
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: path.Join("root", "path"),
|
||||
argumentPath: "",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("root", "path"),
|
||||
},
|
||||
{
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "root",
|
||||
argumentPath: "path",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("root", "path"),
|
||||
},
|
||||
{
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "root",
|
||||
argumentPath: path.Join("path", "to", "file"),
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("root", "path", "to", "file"),
|
||||
},
|
||||
{
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: path.Join("root", "path"),
|
||||
argumentPath: path.Join("subpath", "to", "file"),
|
||||
@@ -126,103 +121,3 @@ func TestSplitPathIntoSlice(t *testing.T) {
|
||||
assert.Equal(t, expected, output)
|
||||
}
|
||||
}
|
||||
|
||||
func Test2FAStateMachine(t *testing.T) {
|
||||
fixtures := []struct {
|
||||
name string
|
||||
mapper configmap.Mapper
|
||||
input fs.ConfigIn
|
||||
expectState string
|
||||
expectErrorMessage string
|
||||
expectResult string
|
||||
expectFail bool
|
||||
expectNil bool
|
||||
}{
|
||||
{
|
||||
name: "no url",
|
||||
mapper: configmap.Simple{},
|
||||
input: fs.ConfigIn{State: ""},
|
||||
expectFail: true,
|
||||
},
|
||||
{
|
||||
name: "unknown state",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: "unknown"},
|
||||
expectFail: true,
|
||||
},
|
||||
{
|
||||
name: "2fa not set",
|
||||
mapper: configmap.Simple{"url": "http://localhost/"},
|
||||
input: fs.ConfigIn{State: ""},
|
||||
expectNil: true,
|
||||
},
|
||||
{
|
||||
name: "no password in config",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: ""},
|
||||
expectState: "password",
|
||||
},
|
||||
{
|
||||
name: "config ready for 2fa token",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username", "pass": obscure.MustObscure("password")},
|
||||
input: fs.ConfigIn{State: ""},
|
||||
expectState: "2fa",
|
||||
},
|
||||
{
|
||||
name: "password not entered",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: "password"},
|
||||
expectState: "",
|
||||
expectErrorMessage: "Password can't be blank",
|
||||
},
|
||||
{
|
||||
name: "password entered",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: "password", Result: "password"},
|
||||
expectState: "2fa",
|
||||
},
|
||||
{
|
||||
name: "ask for a 2fa code",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: "2fa"},
|
||||
expectState: "2fa_do",
|
||||
},
|
||||
{
|
||||
name: "no 2fa code entered",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: "2fa_do"},
|
||||
expectState: "2fa", // ask for a code again
|
||||
expectErrorMessage: "2FA codes can't be blank",
|
||||
},
|
||||
{
|
||||
name: "2fa error and retry",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: "2fa_error", Result: "true"},
|
||||
expectState: "2fa", // ask for a code again
|
||||
},
|
||||
{
|
||||
name: "2fa error and fail",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: "2fa_error"},
|
||||
expectFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, fixture := range fixtures {
|
||||
t.Run(fixture.name, func(t *testing.T) {
|
||||
output, err := Config(context.Background(), "test", fixture.mapper, fixture.input)
|
||||
if fixture.expectFail {
|
||||
require.Error(t, err)
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
if fixture.expectNil {
|
||||
require.Nil(t, output)
|
||||
return
|
||||
}
|
||||
assert.Equal(t, fixture.expectState, output.State)
|
||||
assert.Equal(t, fixture.expectErrorMessage, output.Error)
|
||||
assert.Equal(t, fixture.expectResult, output.Result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Package sftp provides a filesystem interface using github.com/pkg/sftp
|
||||
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp
|
||||
@@ -56,24 +55,28 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "SSH host to connect to.\n\nE.g. \"example.com\".",
|
||||
Help: "SSH host to connect to",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "example.com",
|
||||
Help: "Connect to example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SSH username, leave blank for current username, " + currentUser + ".",
|
||||
Help: "SSH username, leave blank for current username, " + currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SSH port, leave blank to use default (22).",
|
||||
Help: "SSH port, leave blank to use default (22)",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "SSH password, leave blank to use ssh-agent.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_pem",
|
||||
Help: "Raw PEM-encoded private key.\n\nIf specified, will override key_file parameter.",
|
||||
Help: "Raw PEM-encoded private key, If specified, will override key_file parameter.",
|
||||
}, {
|
||||
Name: "key_file",
|
||||
Help: "Path to PEM-encoded private key file.\n\nLeave blank or set key-use-agent to use ssh-agent." + env.ShellExpandHelp,
|
||||
Help: "Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "key_file_pass",
|
||||
Help: `The passphrase to decrypt the PEM-encoded private key file.
|
||||
@@ -94,7 +97,7 @@ Set this value to enable server host key validation.` + env.ShellExpandHelp,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "~/.ssh/known_hosts",
|
||||
Help: "Use OpenSSH's known_hosts file.",
|
||||
Help: "Use OpenSSH's known_hosts file",
|
||||
}},
|
||||
}, {
|
||||
Name: "key_use_agent",
|
||||
@@ -131,7 +134,7 @@ Those algorithms are insecure and may allow plaintext data to be recovered by an
|
||||
}, {
|
||||
Name: "disable_hashcheck",
|
||||
Default: false,
|
||||
Help: "Disable the execution of SSH commands to determine if remote file hashing is available.\n\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
|
||||
Help: "Disable the execution of SSH commands to determine if remote file hashing is available.\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
|
||||
}, {
|
||||
Name: "ask_password",
|
||||
Default: false,
|
||||
@@ -166,12 +169,12 @@ Home directory can be found in a shared folder called "home"
|
||||
}, {
|
||||
Name: "md5sum_command",
|
||||
Default: "",
|
||||
Help: "The command used to read md5 hashes.\n\nLeave blank for autodetect.",
|
||||
Help: "The command used to read md5 hashes. Leave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "sha1sum_command",
|
||||
Default: "",
|
||||
Help: "The command used to read sha1 hashes.\n\nLeave blank for autodetect.",
|
||||
Help: "The command used to read sha1 hashes. Leave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
@@ -193,7 +196,7 @@ The subsystem option is ignored when server_command is defined.`,
|
||||
}, {
|
||||
Name: "use_fstat",
|
||||
Default: false,
|
||||
Help: `If set use fstat instead of stat.
|
||||
Help: `If set use fstat instead of stat
|
||||
|
||||
Some servers limit the amount of open files and calling Stat after opening
|
||||
the file will throw an error from the server. Setting this flag will call
|
||||
@@ -207,7 +210,7 @@ any given time.
|
||||
}, {
|
||||
Name: "disable_concurrent_reads",
|
||||
Default: false,
|
||||
Help: `If set don't use concurrent reads.
|
||||
Help: `If set don't use concurrent reads
|
||||
|
||||
Normally concurrent reads are safe to use and not using them will
|
||||
degrade performance, so this option is disabled by default.
|
||||
@@ -226,7 +229,7 @@ If concurrent reads are disabled, the use_fstat option is ignored.
|
||||
}, {
|
||||
Name: "disable_concurrent_writes",
|
||||
Default: false,
|
||||
Help: `If set don't use concurrent writes.
|
||||
Help: `If set don't use concurrent writes
|
||||
|
||||
Normally rclone uses concurrent writes to upload files. This improves
|
||||
the performance greatly, especially for distant servers.
|
||||
@@ -237,7 +240,7 @@ This option disables concurrent writes should that be necessary.
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Help: `Max time before closing idle connections.
|
||||
Help: `Max time before closing idle connections
|
||||
|
||||
If no connections have been returned to the connection pool in the time
|
||||
given, rclone will empty the connection pool.
|
||||
@@ -296,7 +299,7 @@ type Fs struct {
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
savedpswd string
|
||||
sessions int32 // count in use sessions
|
||||
transfers int32 // count in use references
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -310,6 +313,13 @@ type Object struct {
|
||||
sha1sum *string // Cached SHA1 checksum
|
||||
}
|
||||
|
||||
// debugf calls fs.Debugf if --dump bodies or --dump headers is set
|
||||
func (f *Fs) debugf(o interface{}, text string, args ...interface{}) {
|
||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
fs.Debugf(o, text, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// dial starts a client connection to the given SSH server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SSH handshake, and then sets up a Client.
|
||||
@@ -359,21 +369,21 @@ func (c *conn) closed() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Show that we are using an ssh session
|
||||
// Show that we are doing an upload or download
|
||||
//
|
||||
// Call removeSession() when done
|
||||
func (f *Fs) addSession() {
|
||||
atomic.AddInt32(&f.sessions, 1)
|
||||
// Call removeTransfer() when done
|
||||
func (f *Fs) addTransfer() {
|
||||
atomic.AddInt32(&f.transfers, 1)
|
||||
}
|
||||
|
||||
// Show the ssh session is no longer in use
|
||||
func (f *Fs) removeSession() {
|
||||
atomic.AddInt32(&f.sessions, -1)
|
||||
// Show the upload or download done
|
||||
func (f *Fs) removeTransfer() {
|
||||
atomic.AddInt32(&f.transfers, -1)
|
||||
}
|
||||
|
||||
// getSessions shows whether there are any sessions in use
|
||||
func (f *Fs) getSessions() int32 {
|
||||
return atomic.LoadInt32(&f.sessions)
|
||||
// getTransfers shows whether there are any transfers in progress
|
||||
func (f *Fs) getTransfers() int32 {
|
||||
return atomic.LoadInt32(&f.transfers)
|
||||
}
|
||||
|
||||
// Open a new connection to the SFTP server.
|
||||
@@ -502,8 +512,8 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
f.poolMu.Lock()
|
||||
defer f.poolMu.Unlock()
|
||||
if sessions := f.getSessions(); sessions != 0 {
|
||||
fs.Debugf(f, "Not closing %d unused connections as %d sessions active", len(f.pool), sessions)
|
||||
if transfers := f.getTransfers(); transfers != 0 {
|
||||
fs.Debugf(f, "Not closing %d unused connections as %d transfers in progress", len(f.pool), transfers)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
@@ -761,7 +771,9 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFs")
|
||||
}
|
||||
f.debugf(f, "> Getwd")
|
||||
cwd, err := c.sftpClient.Getwd()
|
||||
f.debugf(f, "< Getwd: %q, err=%#v", cwd, err)
|
||||
f.putSftpConnection(&c, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to read current directory - using relative paths: %v", err)
|
||||
@@ -780,7 +792,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
}
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorIsDir {
|
||||
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||
// File doesn't exist so return old f
|
||||
f.root = root
|
||||
f.absRoot = oldAbsRoot
|
||||
@@ -842,7 +854,9 @@ func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "dirExists")
|
||||
}
|
||||
f.debugf(f, "> Stat dirExists: %q", dir)
|
||||
info, err := c.sftpClient.Stat(dir)
|
||||
f.debugf(f, "< Stat dirExists: %#v, err=%#v", info, err)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
@@ -882,7 +896,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "List")
|
||||
}
|
||||
f.debugf(f, "> ReadDir: %q", sftpDir)
|
||||
infos, err := c.sftpClient.ReadDir(sftpDir)
|
||||
f.debugf(f, "< ReadDir: %#v, err=%#v", infos, err)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
||||
@@ -973,7 +989,9 @@ func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "mkdir")
|
||||
}
|
||||
f.debugf(f, "> Mkdir: %q", dirPath)
|
||||
err = c.sftpClient.Mkdir(dirPath)
|
||||
f.debugf(f, "< Mkdir: err=%#v", err)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "mkdir %q failed", dirPath)
|
||||
@@ -1004,7 +1022,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
f.debugf(f, "> Rmdir: %q", root)
|
||||
err = c.sftpClient.RemoveDirectory(root)
|
||||
f.debugf(f, "< Rmdir: err=%#v", err)
|
||||
f.putSftpConnection(&c, err)
|
||||
return err
|
||||
}
|
||||
@@ -1024,10 +1044,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move")
|
||||
}
|
||||
err = c.sftpClient.Rename(
|
||||
srcObj.path(),
|
||||
path.Join(f.absRoot, remote),
|
||||
)
|
||||
srcPath, dstPath := srcObj.path(), path.Join(f.absRoot, remote)
|
||||
f.debugf(f, "> Rename file: src=%q, dst=%q", srcPath, dstPath)
|
||||
err = c.sftpClient.Rename(srcPath, dstPath)
|
||||
f.debugf(f, "< Rename file: err=%#v", err)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move Rename failed")
|
||||
@@ -1076,10 +1096,12 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove")
|
||||
}
|
||||
f.debugf(f, "> Rename dir: src=%q, dst=%q", srcPath, dstPath)
|
||||
err = c.sftpClient.Rename(
|
||||
srcPath,
|
||||
dstPath,
|
||||
)
|
||||
f.debugf(f, "< Rename dir: err=%#v", err)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
|
||||
@@ -1089,16 +1111,15 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// run runds cmd on the remote end returning standard output
|
||||
func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||
f.addSession() // Show session in use
|
||||
defer f.removeSession()
|
||||
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "run: get SFTP connection")
|
||||
}
|
||||
defer f.putSftpConnection(&c, err)
|
||||
|
||||
f.debugf(f, "> NewSession run")
|
||||
session, err := c.sshClient.NewSession()
|
||||
f.debugf(f, "< NewSession run: %#v, err=%#v", session, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "run: get SFTP session")
|
||||
}
|
||||
@@ -1110,7 +1131,9 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
|
||||
f.debugf(f, "> Run cmd: %q", cmd)
|
||||
err = session.Run(cmd)
|
||||
f.debugf(f, "< Run cmd: err=%#v", err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to run %q: %s", cmd, stderr.Bytes())
|
||||
}
|
||||
@@ -1230,8 +1253,6 @@ func (o *Object) Remote() string {
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
o.fs.addSession() // Show session in use
|
||||
defer o.fs.removeSession()
|
||||
if o.fs.opt.DisableHashCheck {
|
||||
return "", nil
|
||||
}
|
||||
@@ -1259,7 +1280,9 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Hash get SFTP connection")
|
||||
}
|
||||
o.fs.debugf(o, "> NewSession hash")
|
||||
session, err := c.sshClient.NewSession()
|
||||
o.fs.debugf(o, "< NewSession hash: %#v, err=%#v", session, err)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Hash put SFTP connection")
|
||||
@@ -1369,7 +1392,9 @@ func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err err
|
||||
return nil, errors.Wrap(err, "stat")
|
||||
}
|
||||
absPath := path.Join(f.absRoot, remote)
|
||||
f.debugf(f, "> Stat file: %q", absPath)
|
||||
info, err = c.sftpClient.Stat(absPath)
|
||||
f.debugf(f, "< Stat file: %#v, err=%#v", info, err)
|
||||
f.putSftpConnection(&c, err)
|
||||
return info, err
|
||||
}
|
||||
@@ -1384,7 +1409,7 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
return errors.Wrap(err, "stat failed")
|
||||
}
|
||||
if info.IsDir() {
|
||||
return fs.ErrorIsDir
|
||||
return errors.Wrapf(fs.ErrorNotAFile, "%q", o.remote)
|
||||
}
|
||||
o.setMetadata(info)
|
||||
return nil
|
||||
@@ -1401,7 +1426,9 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
o.fs.debugf(o, "> Chtimes: %q, %v", o.path(), modTime)
|
||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||
o.fs.debugf(o, "< Chtimes: err=%#v", err)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime failed")
|
||||
@@ -1435,7 +1462,7 @@ func (f *Fs) newObjectReader(sftpFile *sftp.File) *objectReader {
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
// Show connection in use
|
||||
f.addSession()
|
||||
f.addTransfer()
|
||||
|
||||
go func() {
|
||||
// Use sftpFile.WriteTo to pump data so that it gets a
|
||||
@@ -1466,7 +1493,7 @@ func (file *objectReader) Close() (err error) {
|
||||
// Wait for the background process to finish
|
||||
<-file.done
|
||||
// Show connection no longer in use
|
||||
file.f.removeSession()
|
||||
file.f.removeTransfer()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1489,7 +1516,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open")
|
||||
}
|
||||
o.fs.debugf(o, "> Open read: %q", o.path())
|
||||
sftpFile, err := c.sftpClient.Open(o.path())
|
||||
o.fs.debugf(o, "< Open read: %#v, err=%#v", sftpFile, err)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
@@ -1519,8 +1548,8 @@ func (sr *sizeReader) Size() int64 {
|
||||
|
||||
// Update a remote sftp file using the data <in> and ModTime from <src>
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
o.fs.addSession() // Show session in use
|
||||
defer o.fs.removeSession()
|
||||
o.fs.addTransfer() // Show transfer in progress
|
||||
defer o.fs.removeTransfer()
|
||||
// Clear the hash cache since we are about to update the object
|
||||
o.md5sum = nil
|
||||
o.sha1sum = nil
|
||||
@@ -1528,7 +1557,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update")
|
||||
}
|
||||
o.fs.debugf(o, "> OpenFile write: %q", o.path())
|
||||
file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
|
||||
o.fs.debugf(o, "< OpenFile write: %#v, err=%#v", file, err)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update Create failed")
|
||||
@@ -1540,7 +1571,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
fs.Debugf(src, "Failed to open new SSH connection for delete: %v", removeErr)
|
||||
return
|
||||
}
|
||||
o.fs.debugf(o, "> Remove file: %q", o.path())
|
||||
removeErr = c.sftpClient.Remove(o.path())
|
||||
o.fs.debugf(o, "< Remove file: err=%#v", removeErr)
|
||||
o.fs.putSftpConnection(&c, removeErr)
|
||||
if removeErr != nil {
|
||||
fs.Debugf(src, "Failed to remove: %v", removeErr)
|
||||
@@ -1589,7 +1622,9 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
}
|
||||
o.fs.debugf(o, "> Remove: %q", o.path())
|
||||
err = c.sftpClient.Remove(o.path())
|
||||
o.fs.debugf(o, "< Remove: err=%#v", err)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user