1
0
mirror of https://github.com/rclone/rclone.git synced 2026-02-01 01:03:20 +00:00

Compare commits

..

1 Commits

670 changed files with 49166 additions and 99239 deletions

View File

@@ -5,31 +5,19 @@ about: Report a problem with rclone
<!--
We understand you are having a problem with rclone; we want to help you with that!
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
**STOP and READ**
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
Please show the effort you've put in to solving the problem and please be specific.
People are volunteering their time to help! Low effort posts are not likely to get good answers!
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
If you can still replicate it or just got a question then please use the rclone forum:
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
https://forum.rclone.org/
for a quick response instead of filing an issue on this repo.
instead of filing an issue for a quick response.
If nothing else helps, then please fill in the info below which helps us help you.
If you think you might have found a bug, please can you try to replicate it with the latest beta?
**DO NOT REDACT** any information except passwords/keys/personal info.
You should use 3 backticks to begin and end your paste to make it readable.
Make sure to include a log obtained with '-vv'.
You can also use '-vv --log-file bug.log' and a service such as https://pastebin.com or https://gist.github.com/
https://beta.rclone.org/
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
Thank you
@@ -37,10 +25,6 @@ The Rclone Developers
-->
#### The associated forum post URL from `https://forum.rclone.org`
#### What is the problem you are having with rclone?
@@ -53,7 +37,7 @@ The Rclone Developers
#### Which cloud storage system are you using? (e.g. Google Drive)
#### Which cloud storage system are you using? (e.g. Google Drive)
@@ -64,11 +48,3 @@ The Rclone Developers
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
<!--- Please keep the note below for others who read your bug report. -->
#### How to use GitHub
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
* Subscribe to receive notifications on status change and new comments.

View File

@@ -7,16 +7,12 @@ about: Suggest a new feature or enhancement for rclone
Welcome :-)
So you've got an idea to improve rclone? We love that!
You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
Probably the latest beta (or stable) release has your feature, so try to update your rclone.
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
Here is a checklist of things to do:
If it still isn't there, here is a checklist of things to do:
1. Search the old issues for your idea and +1 or comment on an existing issue if possible.
2. Discuss on the forum: https://forum.rclone.org/
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
2. Discuss on the forum first: https://forum.rclone.org/
3. Make a feature request issue (this is the right place!).
4. Be prepared to get involved making the feature :-)
@@ -26,9 +22,6 @@ The Rclone Developers
-->
#### The associated forum post URL from `https://forum.rclone.org`
#### What is your current rclone version (output from `rclone version`)?
@@ -41,11 +34,3 @@ The Rclone Developers
#### How do you think rclone should be changed to solve that?
<!--- Please keep the note below for others who read your feature request. -->
#### How to use GitHub
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
* Subscribe to receive notifications on status change and new comments.

View File

@@ -22,7 +22,7 @@ Link issues and relevant forum posts here.
#### Checklist
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-new-feature-or-bug-fix).
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
- [ ] I have added tests for all changes in this PR if appropriate.
- [ ] I have added documentation for the changes if appropriate.
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).

View File

@@ -12,36 +12,29 @@ on:
tags:
- '*'
pull_request:
workflow_dispatch:
inputs:
manual:
required: true
default: true
jobs:
build:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.14', 'go1.15', 'go1.16']
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
include:
- job_name: linux
os: ubuntu-latest
go: '1.17.x'
go: '1.16.x'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
racequicktest: true
librclonetest: true
deploy: true
- job_name: mac_amd64
os: macOS-latest
go: '1.17.x'
go: '1.16.x'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -50,14 +43,14 @@ jobs:
- job_name: mac_arm64
os: macOS-latest
go: '1.17.x'
go: '1.16.x'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows_amd64
os: windows-latest
go: '1.17.x'
go: '1.16.x'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
build_args: '-buildmode exe'
@@ -67,7 +60,7 @@ jobs:
- job_name: windows_386
os: windows-latest
go: '1.17.x'
go: '1.16.x'
gotags: cmount
goarch: '386'
cgo: '1'
@@ -78,11 +71,16 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '1.17.x'
go: '1.16.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.13
os: ubuntu-latest
go: '1.13.x'
quicktest: true
- job_name: go1.14
os: ubuntu-latest
go: '1.14.x'
@@ -95,12 +93,6 @@ jobs:
quicktest: true
racequicktest: true
- job_name: go1.16
os: ubuntu-latest
go: '1.16.x'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
@@ -195,13 +187,12 @@ jobs:
make racequicktest
if: matrix.racequicktest
- name: Run librclone tests
- name: Code quality test
shell: bash
run: |
make -C librclone/ctest test
make -C librclone/ctest clean
librclone/python/test_rclone.py
if: matrix.librclonetest
make build_dep
make check
if: matrix.check
- name: Compile all architectures test
shell: bash
@@ -222,122 +213,46 @@ jobs:
# Deploy binaries if enabled in config && not a PR && not a fork
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30
name: "lint"
xgo:
timeout-minutes: 60
name: "xgo cross compile"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Code quality test
uses: golangci/golangci-lint-action@v2
uses: actions/checkout@v1
with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
# Checkout into a fixed path to avoid import path problems on go < 1.11
path: ./src/github.com/rclone/rclone
android:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Set up Go 1.16
uses: actions/setup-go@v1
with:
go-version: 1.16
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
- name: Force NDK version
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Set global environment variables
- name: Set environment variables
shell: bash
run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
- name: build native rclone
- name: Cross-compile rclone
run: |
docker pull billziss/xgo-cgofuse
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
# xgo \
# -image=billziss/xgo-cgofuse \
# -targets=darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
# -tags cmount \
# -dest build \
# .
xgo \
-image=billziss/xgo-cgofuse \
-targets=android/*,ios/* \
-dest build \
.
- name: Build rclone
shell: bash
run: |
make
- name: install gomobile
run: |
go get golang.org/x/mobile/cmd/gobind
go get golang.org/x/mobile/cmd/gomobile
env PATH=$PATH:~/go/bin gomobile init
- name: arm-v7a gomobile build
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
- name: arm64-v8a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
- name: x86 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x86 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
- name: x64 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
- name: Upload artifacts
run: |
make ci_upload

View File

@@ -7,7 +7,6 @@ on:
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:

View File

@@ -6,7 +6,6 @@ on:
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
@@ -32,28 +31,3 @@ jobs:
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'
needs: build
runs-on: ubuntu-latest
name: Build docker plugin job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

5
.gitignore vendored
View File

@@ -1,7 +1,6 @@
*~
_junk/
rclone
rclone.exe
build
docs/public
rclone.iml
@@ -11,7 +10,3 @@ rclone.iml
*.log
*.iml
fuzz-build.zip
*.orig
*.rej
Thumbs.db
__pycache__

View File

@@ -5,7 +5,7 @@ linters:
- deadcode
- errcheck
- goimports
- revive
- golint
- ineffassign
- structcheck
- varcheck
@@ -24,7 +24,3 @@ issues:
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m

View File

@@ -12,164 +12,94 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/):
* Rclone version (e.g. output from `rclone version`)
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
* Rclone version (e.g. output from `rclone -V`)
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a new feature or bug fix ##
## Submitting a pull request ##
If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub.
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
If it is a big feature then make an issue first so it can be discussed.
To prepare your pull request first press the fork button on [rclone's GitHub
You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone).
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
Now in your terminal
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
git clone https://github.com/rclone/rclone.git
cd rclone
go get -u github.com/rclone/rclone
cd $GOPATH/src/github.com/rclone/rclone
git remote rename origin upstream
# if you have SSH keys setup in your GitHub account:
git remote add origin git@github.com:YOURUSER/rclone.git
# otherwise:
git remote add origin https://github.com/YOURUSER/rclone.git
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
Now [install Go](https://golang.org/doc/install) and verify your installation:
go version
Great, you can now compile and execute your own version of rclone:
go build
./rclone version
(Note that you can also replace `go build` with `make`, which will include a
more accurate version number in the executable as well as enable you to specify
more build options.) Finally make a branch to add your new feature
Make a branch to add your new feature
git checkout -b my-new-feature
And get hacking.
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
When ready - run the unit tests for the code you changed
When ready - test the affected functionality and run the unit tests for the code you changed
cd folder/with/changed/files
go test -v
Note that you may need to make a test remote, e.g. `TestSwift` for some
of the unit tests.
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
Note the top level Makefile targets
* make check
* make test
Both of these will be run by Travis when you make a pull request but
you can do this yourself locally too. These require some extra go
packages which you can install with
* make build_dep
Make sure you
* Add [unit tests](#testing) for a new feature.
* Add [documentation](#writing-documentation) for a new feature.
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
* Follow the [commit message guidelines](#commit-messages).
* Add [unit tests](#testing) for a new feature
* squash commits down to one per feature
* rebase to master with `git rebase master`
When you are done with that push your changes to Github:
When you are done with that
git push -u origin my-new-feature
and open the GitHub website to [create your pull
Go to the GitHub website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/).
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
You patch will get reviewed and you might get asked to fix some stuff.
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running:
```
git log # See how many commits you want to squash
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
git commit # Add a new commit message.
git push --force # Push the squashed commit to your GitHub repo.
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
```
## Using Git and Github ##
### Committing your changes ###
Follow the guideline for [commit messages](#commit-messages) and then:
git checkout my-new-feature # To switch to your branch
git status # To see the new and changed files
git add FILENAME # To select FILENAME for the commit
git status # To verify the changes to be committed
git commit # To do the commit
git log # To verify the commit. Use q to quit the log
You can modify the message or changes in the latest commit using:
git commit --amend
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits ###
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
Your previously pushed commits are replaced by:
git push --force origin my-new-feature
### Basing your changes on the latest master ###
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
git checkout master
git fetch upstream
git merge --ff-only
git push origin --follow-tags # optional update of your fork in GitHub
git checkout my-new-feature
git rebase master
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Squashing your commits ###
To combine your commits into one commit:
git log # To count the commits to squash, e.g. the last 2
git reset --soft HEAD~2 # To undo the 2 latest commits
git status # To check everything is as expected
If everything is fine, then make the new combined commit:
git commit # To commit the undone commits as one
otherwise, you may roll back using:
git reflog # To check that HEAD{1} is your previous state
git reset --soft 'HEAD@{1}' # To roll back to your previous state
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
### GitHub Continuous Integration ###
## CI for your fork ##
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
## Testing ##
### Quick testing ###
rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests.
go test -v ./...
You can also use `make`, if supported by your platform
make quicktest
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
### Backend testing ###
rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud
storage systems by mocking all their interfaces, rclone unit tests can
@@ -203,19 +133,12 @@ project root:
go install github.com/rclone/rclone/fstest/test_all
test_all -backend drive
### Full integration testing ###
If you want to run all the integration tests against all the remotes,
then change into the project root and run
make check
make test
The commands may require some extra go packages which you can install with
make build_dep
The full integration tests are run daily on the integration test server. You can
This command is run daily on the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ##
@@ -230,7 +153,6 @@ with modules beneath.
* cmd - the rclone commands
* all - import this to load all the commands
* ...commands
* cmdtest - end-to-end tests of commands, flags, environment variables,...
* docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated
* command - these are auto generated - edit the corresponding .go file
@@ -275,28 +197,9 @@ If you add a new general flag (not for a backend), then document it in
alphabetical order.
If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field.
* Start with the most important information about the option,
as a single sentence on a single line.
* This text will be used for the command-line flag help.
* It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence.
* It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help.
* Try to keep it below 80 characters, to reduce text wrapping in the terminal.
* More details can be added in a new paragraph, after an empty line (`"\n\n"`).
* Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph.
* This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release).
* To create options of enumeration type use the `Examples:` field.
* Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of
countries, it looks better without an ending period/full stop character.
the source file in the `Help:` field. The first line of this is used
for the flag help, the remainder is shown to the user in `rclone
config` and is added to the docs with `make backenddocs`.
The only documentation you need to edit are the `docs/content/*.md`
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
@@ -305,9 +208,7 @@ website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
Documentation for rclone sub commands is with their code, e.g.
`cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
line, without a period/full stop character at the end, as it will be
combined unmodified with other information (such as any default value).
`cmd/ls/ls.go`.
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy.

8457
MANUAL.html generated

File diff suppressed because it is too large Load Diff

10707
MANUAL.md generated

File diff suppressed because it is too large Load Diff

10948
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -93,7 +93,7 @@ build_dep:
# Get the release dependencies we only install on linux
release_dep_linux:
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
cd /tmp && go get github.com/goreleaser/nfpm/v2/...
# Get the release dependencies we only install on Windows
release_dep_windows:
@@ -119,7 +119,7 @@ doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
rclone.1: MANUAL.md
pandoc -s --from markdown-smart --to man MANUAL.md -o rclone.1
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs rcdocs
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
./bin/make_manual.py
MANUAL.html: MANUAL.md
@@ -187,10 +187,10 @@ upload_github:
./bin/upload-github $(TAG)
cross: doc
go run bin/cross-compile.go -release current $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go -release current $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
beta:
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
@@ -198,7 +198,7 @@ log_since_last_release:
git log $(LAST_TAG)..
compile_all:
go run bin/cross-compile.go -compile-only $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
ci_upload:
sudo chown -R $$USER build
@@ -256,33 +256,3 @@ startstable:
winzip:
zip -9 rclone-$(TAG).zip rclone.exe
# docker volume plugin
PLUGIN_USER ?= rclone
PLUGIN_TAG ?= latest
PLUGIN_BASE_TAG ?= latest
PLUGIN_ARCH ?= amd64
PLUGIN_IMAGE := $(PLUGIN_USER)/docker-volume-rclone:$(PLUGIN_TAG)
PLUGIN_BASE := $(PLUGIN_USER)/rclone:$(PLUGIN_BASE_TAG)
PLUGIN_BUILD_DIR := ./build/docker-plugin
PLUGIN_CONTRIB_DIR := ./contrib/docker-plugin/managed
docker-plugin-create:
docker buildx inspect |grep -q /${PLUGIN_ARCH} || \
docker run --rm --privileged tonistiigi/binfmt --install all
rm -rf ${PLUGIN_BUILD_DIR}
docker buildx build \
--no-cache --pull \
--build-arg BASE_IMAGE=${PLUGIN_BASE} \
--platform linux/${PLUGIN_ARCH} \
--output ${PLUGIN_BUILD_DIR}/rootfs \
${PLUGIN_CONTRIB_DIR}
cp ${PLUGIN_CONTRIB_DIR}/config.json ${PLUGIN_BUILD_DIR}
docker plugin rm --force ${PLUGIN_IMAGE} 2>/dev/null || true
docker plugin create ${PLUGIN_IMAGE} ${PLUGIN_BUILD_DIR}
docker-plugin-push:
docker plugin push ${PLUGIN_IMAGE}
docker plugin rm ${PLUGIN_IMAGE}
docker-plugin: docker-plugin-create docker-plugin-push

View File

@@ -32,6 +32,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
@@ -61,7 +62,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
@@ -87,6 +87,7 @@ Please see [the full list of all storage providers and their features](https://r
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional transparent compression ([Compress](https://rclone.org/compress/))
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna

View File

@@ -76,24 +76,6 @@ Now
The rclone docker image should autobuild on via GitHub actions. If it doesn't
or needs to be updated then rebuild like this.
See: https://github.com/ilteoood/docker_buildx/issues/19
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
```
git co v1.54.1
docker pull golang
export DOCKER_CLI_EXPERIMENTAL=enabled
docker buildx create --name actions_builder --use
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
echo "Supported platforms: $SUPPORTED_PLATFORMS"
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
docker buildx stop actions_builder
```
### Old build for linux/amd64 only
```
docker pull golang
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .

View File

@@ -1 +1 @@
v1.57.0
v1.55.0

View File

@@ -20,7 +20,7 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "remote",
Help: "Remote or path to alias.\n\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
Required: true,
}},
}

View File

@@ -11,7 +11,6 @@ import (
_ "github.com/rclone/rclone/backend/local" // pull in test backend
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/stretchr/testify/require"
)
@@ -20,7 +19,7 @@ var (
)
func prepare(t *testing.T, root string) {
configfile.Install()
config.LoadConfig(context.Background())
// Configure the remote
config.FileSet(remoteName, "type", "alias")

View File

@@ -18,7 +18,6 @@ import (
_ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hasher"
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
@@ -38,12 +37,10 @@ import (
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/tardigrade"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex"
_ "github.com/rclone/rclone/backend/zoho"

View File

@@ -16,6 +16,7 @@ import (
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"path"
"strings"
@@ -69,10 +70,11 @@ func init() {
Prefix: "acd",
Description: "Amazon Drive",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: acdConfig,
})
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "checkpoint",
@@ -81,16 +83,16 @@ func init() {
Advanced: true,
}, {
Name: "upload_wait_per_gb",
Help: `Additional time per GiB to wait after a failed complete upload to see if it appears.
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
Sometimes Amazon Drive gives an error when a file has been fully
uploaded but the file appears anyway after a little while. This
happens sometimes for files over 1 GiB in size and nearly every time for
files bigger than 10 GiB. This parameter controls the time rclone waits
happens sometimes for files over 1GB in size and nearly every time for
files bigger than 10GB. This parameter controls the time rclone waits
for the file to appear.
The default value for this parameter is 3 minutes per GiB, so by
default it will wait 3 minutes for every GiB uploaded to see if the
The default value for this parameter is 3 minutes per GB, so by
default it will wait 3 minutes for every GB uploaded to see if the
file appears.
You can disable this feature by setting it to 0. This may cause
@@ -110,7 +112,7 @@ in this situation.`,
Files this size or more will be downloaded via their "tempLink". This
is to work around a problem with Amazon Drive which blocks downloads
of files bigger than about 10 GiB. The default for this is 9 GiB which
of files bigger than about 10GB. The default for this is 9GB which
shouldn't need to be changed.
To download files above this threshold, rclone requests a "tempLink"
@@ -203,10 +205,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
if resp != nil {
if resp.StatusCode == 401 {
f.tokenRenewer.Invalidate()
@@ -281,7 +280,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.getRootInfo(ctx)
_, err := f.getRootInfo()
return err
})
@@ -289,14 +288,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
_, resp, err = f.c.Account.GetEndpoints()
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get endpoints")
}
// Get rootID
rootInfo, err := f.getRootInfo(ctx)
rootInfo, err := f.getRootInfo()
if err != nil || rootInfo.Id == nil {
return nil, errors.Wrap(err, "failed to get root")
}
@@ -338,11 +337,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// getRootInfo gets the root folder info
func (f *Fs) getRootInfo(ctx context.Context) (rootInfo *acd.Folder, err error) {
func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
rootInfo, resp, err = f.c.Nodes.GetRoot()
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
return rootInfo, err
}
@@ -381,7 +380,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
var subFolder *acd.Folder
err = f.pacer.Call(func() (bool, error) {
subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf))
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if err == acd.ErrorNodeNotFound {
@@ -408,7 +407,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
var info *acd.Folder
err = f.pacer.Call(func() (bool, error) {
info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf))
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
@@ -429,7 +428,7 @@ type listAllFn func(*acd.Node) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
query := "parents:" + dirID
if directoriesOnly {
query += " AND kind:" + folderKind
@@ -450,7 +449,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, title string, directorie
var resp *http.Response
err = f.pacer.CallNoRetry(func() (bool, error) {
nodes, resp, err = f.c.Nodes.GetNodes(&opts)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return false, err
@@ -509,7 +508,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var iErr error
for tries := 1; tries <= maxTries; tries++ {
entries = nil
_, err = f.listAll(ctx, directoryID, "", false, false, func(node *acd.Node) bool {
_, err = f.listAll(directoryID, "", false, false, func(node *acd.Node) bool {
remote := path.Join(dir, *node.Name)
switch *node.Kind {
case folderKind:
@@ -668,7 +667,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
if ok {
return false, nil
}
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -709,7 +708,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, err
}
err = f.moveNode(ctx, srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
err = f.moveNode(srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
if err != nil {
return nil, err
}
@@ -804,7 +803,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
var jsonStr string
err = srcFs.pacer.Call(func() (bool, error) {
jsonStr, err = srcInfo.GetMetadata()
return srcFs.shouldRetry(ctx, nil, err)
return srcFs.shouldRetry(nil, err)
})
if err != nil {
fs.Debugf(src, "DirMove error: error reading src metadata: %v", err)
@@ -816,7 +815,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return err
}
err = f.moveNode(ctx, srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
err = f.moveNode(srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
if err != nil {
return err
}
@@ -841,7 +840,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if check {
// check directory is empty
empty := true
_, err = f.listAll(ctx, rootID, "", false, false, func(node *acd.Node) bool {
_, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
switch *node.Kind {
case folderKind:
empty = false
@@ -866,7 +865,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = node.Trash()
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return err
@@ -988,7 +987,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
var info *acd.File
err = o.fs.pacer.Call(func() (bool, error) {
info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf))
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
if err == acd.ErrorNodeNotFound {
@@ -1045,7 +1044,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
} else {
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
}
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
return in, err
}
@@ -1068,7 +1067,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if ok {
return false, nil
}
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return err
@@ -1078,70 +1077,70 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove a node
func (f *Fs) removeNode(ctx context.Context, info *acd.Node) error {
func (f *Fs) removeNode(info *acd.Node) error {
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = info.Trash()
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
return err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.removeNode(ctx, o.info)
return o.fs.removeNode(o.info)
}
// Restore a node
func (f *Fs) restoreNode(ctx context.Context, info *acd.Node) (newInfo *acd.Node, err error) {
func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
newInfo, resp, err = info.Restore()
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
return newInfo, err
}
// Changes name of given node
func (f *Fs) renameNode(ctx context.Context, info *acd.Node, newName string) (newInfo *acd.Node, err error) {
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName))
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
return newInfo, err
}
// Replaces one parent with another, effectively moving the file. Leaves other
// parents untouched. ReplaceParent cannot be used when the file is trashed.
func (f *Fs) replaceParent(ctx context.Context, info *acd.Node, oldParentID string, newParentID string) error {
func (f *Fs) replaceParent(info *acd.Node, oldParentID string, newParentID string) error {
return f.pacer.Call(func() (bool, error) {
resp, err := info.ReplaceParent(oldParentID, newParentID)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
}
// Adds one additional parent to object.
func (f *Fs) addParent(ctx context.Context, info *acd.Node, newParentID string) error {
func (f *Fs) addParent(info *acd.Node, newParentID string) error {
return f.pacer.Call(func() (bool, error) {
resp, err := info.AddParent(newParentID)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
}
// Remove given parent from object, leaving the other possible
// parents untouched. Object can end up having no parents.
func (f *Fs) removeParent(ctx context.Context, info *acd.Node, parentID string) error {
func (f *Fs) removeParent(info *acd.Node, parentID string) error {
return f.pacer.Call(func() (bool, error) {
resp, err := info.RemoveParent(parentID)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
}
// moveNode moves the node given from the srcLeaf,srcDirectoryID to
// the dstLeaf,dstDirectoryID
func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
// fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
cantMove := fs.ErrorCantMove
if useDirErrorMsgs {
@@ -1155,7 +1154,7 @@ func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string,
if srcLeaf != dstLeaf {
// fs.Debugf(name, "renaming")
_, err = f.renameNode(ctx, srcInfo, dstLeaf)
_, err = f.renameNode(srcInfo, dstLeaf)
if err != nil {
fs.Debugf(name, "Move: quick path rename failed: %v", err)
goto OnConflict
@@ -1163,7 +1162,7 @@ func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string,
}
if srcDirectoryID != dstDirectoryID {
// fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
err = f.replaceParent(ctx, srcInfo, srcDirectoryID, dstDirectoryID)
err = f.replaceParent(srcInfo, srcDirectoryID, dstDirectoryID)
if err != nil {
fs.Debugf(name, "Move: quick path parent replace failed: %v", err)
return err
@@ -1176,13 +1175,13 @@ OnConflict:
fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.")
// fs.Debugf(name, "Trashing file")
err = f.removeNode(ctx, srcInfo)
err = f.removeNode(srcInfo)
if err != nil {
fs.Debugf(name, "Move: remove node failed: %v", err)
return err
}
// fs.Debugf(name, "Renaming file")
_, err = f.renameNode(ctx, srcInfo, dstLeaf)
_, err = f.renameNode(srcInfo, dstLeaf)
if err != nil {
fs.Debugf(name, "Move: rename node failed: %v", err)
return err
@@ -1190,19 +1189,19 @@ OnConflict:
// note: replacing parent is forbidden by API, modifying them individually is
// okay though
// fs.Debugf(name, "Adding target parent")
err = f.addParent(ctx, srcInfo, dstDirectoryID)
err = f.addParent(srcInfo, dstDirectoryID)
if err != nil {
fs.Debugf(name, "Move: addParent failed: %v", err)
return err
}
// fs.Debugf(name, "removing original parent")
err = f.removeParent(ctx, srcInfo, srcDirectoryID)
err = f.removeParent(srcInfo, srcDirectoryID)
if err != nil {
fs.Debugf(name, "Move: removeParent failed: %v", err)
return err
}
// fs.Debugf(name, "Restoring")
_, err = f.restoreNode(ctx, srcInfo)
_, err = f.restoreNode(srcInfo)
if err != nil {
fs.Debugf(name, "Move: restoreNode node failed: %v", err)
return err

View File

@@ -1,6 +1,5 @@
// Test AmazonCloudDrive filesystem interface
//go:build acd
// +build acd
package amazonclouddrive_test

View File

@@ -1,7 +1,6 @@
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
// +build !plan9,!solaris,!js,go1.14
package azureblob
@@ -16,7 +15,6 @@ import (
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"time"
@@ -49,8 +47,8 @@ const (
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
storageDefaultBaseURL = "blob.core.windows.net"
defaultChunkSize = 4 * fs.Mebi
maxChunkSize = 100 * fs.Mebi
defaultChunkSize = 4 * fs.MebiByte
maxChunkSize = 100 * fs.MebiByte
uploadConcurrency = 4
defaultAccessTier = azblob.AccessTierNone
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
@@ -75,29 +73,30 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "account",
Help: "Storage Account Name.\n\nLeave blank to use SAS URL or Emulator.",
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)",
}, {
Name: "service_principal_file",
Help: `Path to file containing credentials for use with a service principal.
Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
$ az ad sp create-for-rbac --name "<name>" \
$ az sp create-for-rbac --name "<name>" \
--role "Storage Blob Data Owner" \
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
> azure-principal.json
See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
See [Use Azure CLI to assign an Azure role for access to blob and queue data](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli)
for more details.
`,
}, {
Name: "key",
Help: "Storage Account Key.\n\nLeave blank to use SAS URL or Emulator.",
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)",
}, {
Name: "sas_url",
Help: "SAS URL for container level access only.\n\nLeave blank if using account/key or Emulator.",
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)",
}, {
Name: "use_msi",
Help: `Use a managed service identity to authenticate (only works in Azure).
Help: `Use a managed service identity to authenticate (only works in Azure)
When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
to authenticate to Azure Storage instead of a SAS token or account key.
@@ -110,31 +109,31 @@ msi_client_id, or msi_mi_res_id parameters.`,
Default: false,
}, {
Name: "msi_object_id",
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.",
Advanced: true,
}, {
Name: "msi_client_id",
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.",
Advanced: true,
}, {
Name: "msi_mi_res_id",
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
Help: "Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.",
Advanced: true,
}, {
Name: "use_emulator",
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
Default: false,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
Help: "Endpoint for the service\nLeave blank normally.",
Advanced: true,
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to chunked upload (<= 256 MiB) (deprecated).",
Help: "Cutoff for switching to chunked upload (<= 256MB). (Deprecated)",
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size (<= 100 MiB).
Help: `Upload chunk size (<= 100MB).
Note that this is stored in memory and there may be up to
"--transfers" chunks stored at once in memory.`,
@@ -201,7 +200,6 @@ to start uploading.`,
Default: memoryPoolFlushTime,
Advanced: true,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, {
@@ -219,28 +217,6 @@ This option controls how often unused buffers will be removed from the pool.`,
encoder.EncodeDel |
encoder.EncodeBackSlash |
encoder.EncodeRightPeriod),
}, {
Name: "public_access",
Help: "Public access level of a container: blob or container.",
Default: string(azblob.PublicAccessNone),
Examples: []fs.OptionExample{
{
Value: string(azblob.PublicAccessNone),
Help: "The container and its blobs can be accessed only with an authorized request.\nIt's a default value.",
}, {
Value: string(azblob.PublicAccessBlob),
Help: "Blob data within this container can be read via anonymous request.",
}, {
Value: string(azblob.PublicAccessContainer),
Help: "Allow full public read access for container and blob data.",
},
},
Advanced: true,
}, {
Name: "no_head_object",
Help: `If set, do not do HEAD before GET when getting objects.`,
Default: false,
Advanced: true,
}},
})
}
@@ -265,8 +241,6 @@ type Options struct {
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
PublicAccess string `config:"public_access"`
NoHeadObject bool `config:"no_head_object"`
}
// Fs represents a remote azure server
@@ -288,7 +262,6 @@ type Fs struct {
imdsPacer *fs.Pacer // Same but for IMDS
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
publicAccess azblob.PublicAccessType // Container Public Access Level
}
// Object describes an azure object
@@ -362,19 +335,6 @@ func validateAccessTier(tier string) bool {
}
}
// validatePublicAccess checks if azureblob supports use supplied public access level
func validatePublicAccess(publicAccess string) bool {
switch publicAccess {
case string(azblob.PublicAccessNone),
string(azblob.PublicAccessBlob),
string(azblob.PublicAccessContainer):
// valid cases
return true
default:
return false
}
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (e.g. "Token has expired")
@@ -387,10 +347,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func (f *Fs) shouldRetry(err error) (bool, error) {
// FIXME interpret special errors - more to do here
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {
@@ -412,7 +369,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
@@ -542,11 +499,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
}
if !validatePublicAccess((opt.PublicAccess)) {
return nil, errors.Errorf("Azure Blob: Supported public access level are %s and %s",
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
@@ -565,7 +517,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt.MemoryPoolUseMmap,
),
}
f.publicAccess = azblob.PublicAccessType(opt.PublicAccess)
f.imdsPacer.SetRetries(5) // per IMDS documentation
f.setRoot(root)
f.features = (&fs.Features{
@@ -627,7 +578,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Retry as specified by the documentation:
// https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#retry-guidance
token, err = GetMSIToken(ctx, userMSI)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
@@ -643,7 +594,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var refreshedToken adal.Token
err := f.imdsPacer.Call(func() (bool, error) {
refreshedToken, err = GetMSIToken(ctx, userMSI)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
// Failed to refresh.
@@ -765,7 +716,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItemInternal) (fs
if err != nil {
return nil, err
}
} else if !o.fs.opt.NoHeadObject {
} else {
err := o.readMetaData() // reads info and headers, returning an error
if err != nil {
return nil, err
@@ -852,7 +803,7 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
err := f.pacer.Call(func() (bool, error) {
var err error
response, err = f.cntURL(container).ListBlobsHierarchySegment(ctx, marker, delimiter, options)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
@@ -1078,7 +1029,7 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error {
err := f.pacer.Call(func() (bool, error) {
var err error
response, err = f.svcURL.ListContainersSegment(ctx, marker, params)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return err
@@ -1130,7 +1081,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
}
// now try to create the container
return f.pacer.Call(func() (bool, error) {
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, f.publicAccess)
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {
@@ -1147,7 +1098,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
}
}
}
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
}, nil)
}
@@ -1185,10 +1136,10 @@ func (f *Fs) deleteContainer(ctx context.Context, container string) error {
return false, fs.ErrorDirNotFound
}
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
}
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
})
}
@@ -1261,7 +1212,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
err = f.pacer.Call(func() (bool, error) {
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, azblob.ModifiedAccessConditions{}, options, azblob.AccessTierType(f.opt.AccessTier), nil)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return nil, err
@@ -1375,39 +1326,6 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper
return nil
}
func (o *Object) decodeMetaDataFromDownloadResponse(info *azblob.DownloadResponse) (err error) {
metadata := info.NewMetadata()
size := info.ContentLength()
if isDirectoryMarker(size, metadata, o.remote) {
return fs.ErrorNotAFile
}
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
// this as base64 encoded string.
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
o.mimeType = info.ContentType()
o.size = size
o.modTime = info.LastModified()
o.accessTier = o.AccessTier()
o.setMetadata(metadata)
// If it was a Range request, the size is wrong, so correct it
if contentRange := info.ContentRange(); contentRange != "" {
slash := strings.IndexRune(contentRange, '/')
if slash >= 0 {
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
if err == nil {
o.size = i
} else {
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
}
} else {
fs.Debugf(o, "Failed to find length in %q", contentRange)
}
}
return nil
}
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItemInternal) (err error) {
metadata := info.Metadata
size := *info.Properties.ContentLength
@@ -1455,7 +1373,7 @@ func (o *Object) readMetaData() (err error) {
var blobProperties *azblob.BlobGetPropertiesResponse
err = o.fs.pacer.Call(func() (bool, error) {
blobProperties, err = blob.GetProperties(ctx, options, azblob.ClientProvidedKeyOptions{})
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err != nil {
// On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well
@@ -1490,7 +1408,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
blob := o.getBlobReference()
err := o.fs.pacer.Call(func() (bool, error) {
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err != nil {
return err
@@ -1533,15 +1451,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var downloadResponse *azblob.DownloadResponse
err = o.fs.pacer.Call(func() (bool, error) {
downloadResponse, err = blob.Download(ctx, offset, count, ac, false, azblob.ClientProvidedKeyOptions{})
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to open for download")
}
err = o.decodeMetaDataFromDownloadResponse(downloadResponse)
if err != nil {
return nil, errors.Wrap(err, "failed to decode metadata for download")
}
in = downloadResponse.Body(azblob.RetryReaderOptions{})
return in, nil
}
@@ -1678,7 +1592,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Stream contents of the reader object to the given blob URL
blockBlobURL := blob.ToBlockBlobURL()
_, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions)
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err != nil {
return err
@@ -1706,7 +1620,7 @@ func (o *Object) Remove(ctx context.Context) error {
ac := azblob.BlobAccessConditions{}
return o.fs.pacer.Call(func() (bool, error) {
_, err := blob.Delete(ctx, snapShotOptions, ac)
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
}
@@ -1735,7 +1649,7 @@ func (o *Object) SetTier(tier string) error {
ctx := context.Background()
err := o.fs.pacer.Call(func() (bool, error) {
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{})
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err != nil {

View File

@@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
// +build !plan9,!solaris,!js,go1.14
package azureblob

View File

@@ -1,7 +1,6 @@
// Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
// +build !plan9,!solaris,!js,go1.14
package azureblob

View File

@@ -1,7 +1,6 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || solaris || js
// +build plan9 solaris js
// +build plan9 solaris js !go1.14
package azureblob

View File

@@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
// +build !plan9,!solaris,!js,go1.14
package azureblob

View File

@@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
// +build !plan9,!solaris,!js,go1.14
package azureblob

View File

@@ -2,11 +2,12 @@ package api
import (
"fmt"
"path"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/version"
)
// Error describes a B2 error response
@@ -62,17 +63,16 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
return nil
}
// HasVersion returns true if it looks like the passed filename has a timestamp on it.
//
// Note that the passed filename's timestamp may still be invalid even if this
// function returns true.
func HasVersion(remote string) bool {
return version.Match(remote)
}
const versionFormat = "-v2006-01-02-150405.000"
// AddVersion adds the timestamp as a version string into the filename passed in.
func (t Timestamp) AddVersion(remote string) string {
return version.Add(remote, time.Time(t))
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
s := time.Time(t).Format(versionFormat)
// Replace the '.' with a '-'
s = strings.Replace(s, ".", "-", -1)
return base + s + ext
}
// RemoveVersion removes the timestamp from a filename as a version string.
@@ -80,9 +80,24 @@ func (t Timestamp) AddVersion(remote string) string {
// It returns the new file name and a timestamp, or the old filename
// and a zero timestamp.
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
time, newRemote := version.Remove(remote)
t = Timestamp(time)
return
newRemote = remote
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
if len(base) < len(versionFormat) {
return
}
versionStart := len(base) - len(versionFormat)
// Check it ends in -xxx
if base[len(base)-4] != '-' {
return
}
// Replace with .xxx for parsing
base = base[:len(base)-4] + "." + base[len(base)-3:]
newT, err := time.Parse(versionFormat, base[versionStart:])
if err != nil {
return
}
return Timestamp(newT), base[:versionStart] + ext
}
// IsZero returns true if the timestamp is uninitialized

View File

@@ -13,6 +13,7 @@ import (
var (
emptyT api.Timestamp
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
)
@@ -35,6 +36,40 @@ func TestTimestampUnmarshalJSON(t *testing.T) {
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
}
func TestTimestampAddVersion(t *testing.T) {
for _, test := range []struct {
t api.Timestamp
in string
expected string
}{
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
{t1, "potato", "potato-v2001-02-03-040506-123"},
{t1, "", "-v2001-02-03-040506-123"},
} {
actual := test.t.AddVersion(test.in)
assert.Equal(t, test.expected, actual, test.in)
}
}
func TestTimestampRemoveVersion(t *testing.T) {
for _, test := range []struct {
in string
expectedT api.Timestamp
expectedRemote string
}{
{"potato.txt", emptyT, "potato.txt"},
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
{"potato-v2001-02-03-040506-123", t1, "potato"},
{"-v2001-02-03-040506-123", t1, ""},
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
} {
actualT, actualRemote := api.RemoveVersion(test.in)
assert.Equal(t, test.expectedT, actualT, test.in)
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
}
}
func TestTimestampIsZero(t *testing.T) {
assert.True(t, emptyT.IsZero())
assert.False(t, t0.IsZero())

View File

@@ -54,10 +54,10 @@ const (
decayConstant = 1 // bigger for slower decay, exponential
maxParts = 10000
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
minChunkSize = 5 * fs.Mebi
defaultChunkSize = 96 * fs.Mebi
defaultUploadCutoff = 200 * fs.Mebi
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
minChunkSize = 5 * fs.MebiByte
defaultChunkSize = 96 * fs.MebiByte
defaultUploadCutoff = 200 * fs.MebiByte
largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
@@ -75,15 +75,15 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "account",
Help: "Account ID or Application Key ID.",
Help: "Account ID or Application Key ID",
Required: true,
}, {
Name: "key",
Help: "Application Key.",
Help: "Application Key",
Required: true,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
Help: "Endpoint for the service.\nLeave blank normally.",
Advanced: true,
}, {
Name: "test_mode",
@@ -103,7 +103,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
Advanced: true,
}, {
Name: "versions",
Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Default: false,
Advanced: true,
}, {
@@ -116,34 +116,32 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
Files above this size will be uploaded in chunks of "--b2-chunk-size".
This value should be set no larger than 4.657 GiB (== 5 GB).`,
This value should be set no larger than 4.657GiB (== 5GB).`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy.
Help: `Cutoff for switching to multipart copy
Any files larger than this that need to be server-side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 4.6 GiB.`,
The minimum is 0 and the maximum is 4.6GB.`,
Default: largeFileCopyCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size.
Help: `Upload chunk size. Must fit in memory.
When uploading large files, chunk the file into this size.
Must fit in memory. These chunks are buffered in memory and there
might a maximum of "--transfers" chunks in progress at once.
5,000,000 Bytes is the minimum size.`,
When uploading large files, chunk the file into this size. Note that
these chunks are buffered in memory and there might a maximum of
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files.
Help: `Disable checksums for large (> upload cutoff) files
Normally rclone will calculate the SHA1 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
@@ -307,10 +305,7 @@ var retryErrorCodes = []int{
// shouldRetryNoAuth returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
// For 429 or 503 errors look at the Retry-After: header and
// set the retry appropriately, starting with a minimum of 1
// second if it isn't set.
@@ -341,7 +336,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
}
return true, err
}
return f.shouldRetryNoReauth(ctx, resp, err)
return f.shouldRetryNoReauth(resp, err)
}
// errorHandler parses a non 2xx error response into an error
@@ -509,7 +504,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &f.info)
return f.shouldRetryNoReauth(ctx, resp, err)
return f.shouldRetryNoReauth(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to authenticate")
@@ -1355,7 +1350,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
}
var request = api.GetDownloadAuthorizationRequest{
BucketID: bucketID,
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.rootDirectory, remote)),
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
ValidDurationInSeconds: validDurationInSeconds,
}
var response api.GetDownloadAuthorizationResponse
@@ -1746,13 +1741,6 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
ContentType: resp.Header.Get("Content-Type"),
Info: Info,
}
// When reading files from B2 via cloudflare using
// --b2-download-url cloudflare strips the Content-Length
// headers (presumably so it can inject stuff) so use the old
// length read from the listing.
if info.Size < 0 {
info.Size = o.size
}
return resp, info, nil
}

View File

@@ -230,14 +230,14 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
//
// The number of bytes in the file being uploaded. Note that
// this header is required; you cannot leave it out and just
// use chunked encoding. The minimum size of every part but
// the last one is 100 MB (100,000,000 bytes)
// use chunked encoding. The minimum size of every part but
// the last one is 100MB.
//
// X-Bz-Content-Sha1
//
// The SHA1 checksum of the this part of the file. B2 will
// check this when the part is uploaded, to make sure that the
// data arrived correctly. The same SHA1 checksum must be
// data arrived correctly. The same SHA1 checksum must be
// passed to b2_finish_large_file.
opts := rest.Opts{
Method: "POST",

View File

@@ -36,13 +36,13 @@ func (t *Time) UnmarshalJSON(data []byte) error {
// Error is returned from box when things go wrong
type Error struct {
Type string `json:"type"`
Status int `json:"status"`
Code string `json:"code"`
ContextInfo json.RawMessage `json:"context_info"`
HelpURL string `json:"help_url"`
Message string `json:"message"`
RequestID string `json:"request_id"`
Type string `json:"type"`
Status int `json:"status"`
Code string `json:"code"`
ContextInfo json.RawMessage
HelpURL string `json:"help_url"`
Message string `json:"message"`
RequestID string `json:"request_id"`
}
// Error returns a string for the error and satisfies the error interface
@@ -61,7 +61,7 @@ func (e *Error) Error() string {
var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
// Types of things in Item
const (
@@ -90,12 +90,6 @@ type Item struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
OwnedBy struct {
Type string `json:"type"`
ID string `json:"id"`
Name string `json:"name"`
Login string `json:"login"`
} `json:"owned_by"`
}
// ModTime returns the modification time of the item
@@ -109,11 +103,10 @@ func (i *Item) ModTime() (t time.Time) {
// FolderItems is returned from the GetFolderItems call
type FolderItems struct {
TotalCount int `json:"total_count"`
Entries []Item `json:"entries"`
Offset int `json:"offset"`
Limit int `json:"limit"`
NextMarker *string `json:"next_marker,omitempty"`
TotalCount int `json:"total_count"`
Entries []Item `json:"entries"`
Offset int `json:"offset"`
Limit int `json:"limit"`
Order []struct {
By string `json:"by"`
Direction string `json:"direction"`
@@ -139,38 +132,6 @@ type UploadFile struct {
ContentModifiedAt Time `json:"content_modified_at"`
}
// PreUploadCheck is the request for upload preflight check
type PreUploadCheck struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
Size *int64 `json:"size,omitempty"`
}
// PreUploadCheckResponse is the response from upload preflight check
// if successful
type PreUploadCheckResponse struct {
UploadToken string `json:"upload_token"`
UploadURL string `json:"upload_url"`
}
// PreUploadCheckConflict is returned in the ContextInfo error field
// from PreUploadCheck when the error code is "item_name_in_use"
type PreUploadCheckConflict struct {
Conflicts struct {
Type string `json:"type"`
ID string `json:"id"`
FileVersion struct {
Type string `json:"type"`
ID string `json:"id"`
Sha1 string `json:"sha1"`
} `json:"file_version"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
Sha1 string `json:"sha1"`
Name string `json:"name"`
} `json:"conflicts"`
}
// UpdateFileModTime is used in Update File Info
type UpdateFileModTime struct {
ContentModifiedAt Time `json:"content_modified_at"`

View File

@@ -17,13 +17,12 @@ import (
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/rclone/rclone/lib/encoder"
@@ -58,6 +57,7 @@ const (
decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api.box.com/2.0"
uploadURL = "https://upload.box.com/api/2.0"
listChunks = 1000 // chunk size to read directory listings
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
defaultUploadCutoff = 50 * 1024 * 1024
tokenURL = "https://api.box.com/oauth2/token"
@@ -84,7 +84,7 @@ func init() {
Name: "box",
Description: "Box",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
@@ -93,15 +93,15 @@ func init() {
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil {
return nil, errors.Wrap(err, "failed to configure token with jwt authentication")
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
}
// Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || !boxAccessTokenOk {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
}
}
return nil, nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id",
@@ -110,23 +110,23 @@ func init() {
Advanced: true,
}, {
Name: "box_config_file",
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
}, {
Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.",
Help: "Box App Primary Access Token\nLeave blank normally.",
}, {
Name: "box_sub_type",
Default: "user",
Examples: []fs.OptionExample{{
Value: "user",
Help: "Rclone should act on behalf of a user.",
Help: "Rclone should act on behalf of a user",
}, {
Value: "enterprise",
Help: "Rclone should act on behalf of a service account.",
Help: "Rclone should act on behalf of a service account",
}},
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload (>= 50 MiB).",
Help: "Cutoff for switching to multipart upload (>= 50MB).",
Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true,
}, {
@@ -134,16 +134,6 @@ func init() {
Help: "Max number of times to try committing a multipart file.",
Default: 100,
Advanced: true,
}, {
Name: "list_chunk",
Default: 1000,
Help: "Size of listing chunk 1-1000.",
Advanced: true,
}, {
Name: "owned_by",
Default: "",
Help: "Only show items owned by the login (email address) passed in.",
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -167,15 +157,15 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
jsonFile = env.ShellExpand(jsonFile)
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
return errors.Wrap(err, "get box config")
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
return errors.Wrap(err, "get decrypted private key")
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
return errors.Wrap(err, "get claims")
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
@@ -258,8 +248,6 @@ type Options struct {
Enc encoder.MultiEncoder `config:"encoding"`
RootFolderID string `config:"root_folder_id"`
AccessToken string `config:"access_token"`
ListChunk int `config:"list_chunk"`
OwnedBy string `config:"owned_by"`
}
// Fs represents a remote box
@@ -329,23 +317,13 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(resp *http.Response, err error) (bool, error) {
authRetry := false
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Header.Get("Www-Authenticate"), "expired_token") {
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
authRetry = true
fs.Debugf(nil, "Should retry: %v", err)
}
// Box API errors which should be retries
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "operation_blocked_temporary" {
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -360,7 +338,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err
}
found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
if strings.EqualFold(item.Name, leaf) {
info = item
return true
@@ -535,7 +513,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, true, func(item *api.Item) bool {
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
if strings.EqualFold(item.Name, leaf) {
pathIDOut = item.ID
return true
@@ -570,7 +548,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
@@ -591,26 +569,23 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, activeOnly bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/folders/" + dirID + "/items",
Parameters: fieldsValue(),
}
opts.Parameters.Set("limit", strconv.Itoa(f.opt.ListChunk))
opts.Parameters.Set("usemarker", "true")
var marker *string
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
offset := 0
OUTER:
for {
if marker != nil {
opts.Parameters.Set("marker", *marker)
}
opts.Parameters.Set("offset", strconv.Itoa(offset))
var result api.FolderItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
@@ -629,10 +604,7 @@ OUTER:
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
continue
}
if activeOnly && item.ItemStatus != api.ItemStatusActive {
continue
}
if f.opt.OwnedBy != "" && f.opt.OwnedBy != item.OwnedBy.Login {
if item.ItemStatus != api.ItemStatusActive {
continue
}
item.Name = f.opt.Enc.ToStandardName(item.Name)
@@ -641,8 +613,8 @@ OUTER:
break OUTER
}
}
marker = result.NextMarker
if marker == nil {
offset += result.Limit
if offset >= result.TotalCount {
break
}
}
@@ -664,7 +636,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
remote := path.Join(dir, info.Name)
if info.Type == api.ItemTypeFolder {
// cache the directory ID for later lookups
@@ -711,80 +683,22 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
return o, leaf, directoryID, nil
}
// preUploadCheck checks to see if a file can be uploaded
//
// It returns "", nil if the file is good to go
// It returns "ID", nil if the file must be updated
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
check := api.PreUploadCheck{
Name: f.opt.Enc.FromStandardName(leaf),
Parent: api.Parent{
ID: directoryID,
},
}
if size >= 0 {
check.Size = &size
}
opts := rest.Opts{
Method: "OPTIONS",
Path: "/files/content/",
}
var result api.PreUploadCheckResponse
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &check, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "item_name_in_use" {
var conflict api.PreUploadCheckConflict
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
if err != nil {
return "", errors.Wrap(err, "pre-upload check: JSON decode failed")
}
if conflict.Conflicts.Type != api.ItemTypeFile {
return "", errors.Wrap(err, "pre-upload check: can't overwrite non file with file")
}
return conflict.Conflicts.ID, nil
}
return "", errors.Wrap(err, "pre-upload check")
}
return "", nil
}
// Put the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// If directory doesn't exist, file doesn't exist so can upload
remote := src.Remote()
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return f.PutUnchecked(ctx, in, src, options...)
}
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
switch err {
case nil:
return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src)
default:
return nil, err
}
// Preflight check the upload, which returns the ID if the
// object already exists
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
if err != nil {
return nil, err
}
if ID == "" {
return f.PutUnchecked(ctx, in, src, options...)
}
// If object exists then create a skeleton one with just id
o := &Object{
fs: f,
remote: remote,
id: ID,
}
return o, o.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
@@ -826,7 +740,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
}
@@ -853,7 +767,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "rmdir failed")
@@ -925,7 +839,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var info *api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &copyFile, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -963,7 +877,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -981,7 +895,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &user)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to read user info")
@@ -1094,7 +1008,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return info.SharedLink.URL, err
}
@@ -1112,42 +1026,51 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
}
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) {
var (
deleteErrors = int64(0)
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
wg sync.WaitGroup
)
_, err = f.listAll(ctx, "trash", false, false, false, func(item *api.Item) bool {
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
wg.Add(1)
concurrencyControl <- struct{}{}
go func() {
defer func() {
<-concurrencyControl
wg.Done()
}()
opts := rest.Opts{
Method: "GET",
Path: "/folders/trash/items",
Parameters: url.Values{
"fields": []string{"type", "id"},
},
}
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
offset := 0
for {
opts.Parameters.Set("offset", strconv.Itoa(offset))
var result api.FolderItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list trash")
}
for i := range result.Entries {
item := &result.Entries[i]
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil {
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
atomic.AddInt64(&deleteErrors, 1)
return errors.Wrap(err, "failed to delete file")
}
}()
} else {
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
} else {
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
continue
}
}
offset += result.Limit
if offset >= result.TotalCount {
break
}
return false
})
wg.Wait()
if deleteErrors != 0 {
return errors.Errorf("failed to delete %d trash items", deleteErrors)
}
return err
return
}
// DirCacheFlush resets the directory cache - used in testing as an
@@ -1201,9 +1124,6 @@ func (o *Object) Size() int64 {
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) (err error) {
if info.Type == api.ItemTypeFolder {
return fs.ErrorIsDir
}
if info.Type != api.ItemTypeFile {
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
}
@@ -1262,7 +1182,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
var info *api.Item
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return info, err
}
@@ -1295,7 +1215,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1305,7 +1225,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// upload does a single non-multipart upload
//
// This is recommended for less than 50 MiB of content
// This is recommended for less than 50 MB of content
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
upload := api.UploadFile{
Name: o.fs.opt.Enc.FromStandardName(leaf),
@@ -1335,7 +1255,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return err

View File

@@ -44,7 +44,7 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return
}
@@ -74,7 +74,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
err = o.fs.pacer.Call(func() (bool, error) {
opts.Body = wrap(bytes.NewReader(chunk))
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -109,10 +109,10 @@ outer:
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
if err != nil {
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
}
body, err = rest.ReadBody(resp)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
delay := defaultDelay
var why string
@@ -167,7 +167,7 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return err
}

View File

@@ -1,4 +1,3 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
@@ -69,26 +68,26 @@ func init() {
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "plex_url",
Help: "The URL of the Plex server.",
Help: "The URL of the Plex server",
}, {
Name: "plex_username",
Help: "The username of the Plex user.",
Help: "The username of the Plex user",
}, {
Name: "plex_password",
Help: "The password of the Plex user.",
Help: "The password of the Plex user",
IsPassword: true,
}, {
Name: "plex_token",
Help: "The plex token for authentication - auto set normally.",
Help: "The plex token for authentication - auto set normally",
Hide: fs.OptionHideBoth,
Advanced: true,
}, {
Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server.",
Help: "Skip all certificate verification when connecting to the Plex server",
Advanced: true,
}, {
Name: "chunk_size",
@@ -99,14 +98,14 @@ changed, any downloaded chunks will be invalid and cache-chunk-path
will need to be cleared or unexpected EOF errors will occur.`,
Default: DefCacheChunkSize,
Examples: []fs.OptionExample{{
Value: "1M",
Help: "1 MiB",
Value: "1m",
Help: "1MB",
}, {
Value: "5M",
Help: "5 MiB",
Help: "5 MB",
}, {
Value: "10M",
Help: "10 MiB",
Help: "10 MB",
}},
}, {
Name: "info_age",
@@ -133,22 +132,22 @@ oldest chunks until it goes under this value.`,
Default: DefCacheTotalChunkSize,
Examples: []fs.OptionExample{{
Value: "500M",
Help: "500 MiB",
Help: "500 MB",
}, {
Value: "1G",
Help: "1 GiB",
Help: "1 GB",
}, {
Value: "10G",
Help: "10 GiB",
Help: "10 GB",
}},
}, {
Name: "db_path",
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
Help: "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name.",
Default: filepath.Join(config.CacheDir, "cache-backend"),
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
Advanced: true,
}, {
Name: "chunk_path",
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
Default: filepath.Join(config.CacheDir, "cache-backend"),
Help: `Directory to cache chunk files.
Path to where partial file data (chunks) are stored locally. The remote
@@ -168,7 +167,6 @@ then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
Name: "chunk_clean_interval",
Default: DefCacheChunkCleanInterval,
Help: `How often should the cache perform cleanups of the chunk storage.
The default value should be ok for most people. If you find that the
cache goes over "cache-chunk-total-size" too often then try to lower
this value to force it to perform cleanups more often.`,
@@ -222,7 +220,7 @@ available on the local machine.`,
}, {
Name: "rps",
Default: int(DefCacheRps),
Help: `Limits the number of requests per second to the source FS (-1 to disable).
Help: `Limits the number of requests per second to the source FS (-1 to disable)
This setting places a hard limit on the number of requests per second
that cache will be doing to the cloud provider remote and try to
@@ -243,7 +241,7 @@ still pass.`,
}, {
Name: "writes",
Default: DefCacheWrites,
Help: `Cache file data on writes through the FS.
Help: `Cache file data on writes through the FS
If you need to read files immediately after you upload them through
cache you can enable this flag to have their data stored in the
@@ -264,7 +262,7 @@ provider`,
}, {
Name: "tmp_wait_time",
Default: DefCacheTmpWaitTime,
Help: `How long should files be stored in local cache before being uploaded.
Help: `How long should files be stored in local cache before being uploaded
This is the duration that a file must wait in the temporary location
_cache-tmp-upload-path_ before it is selected for upload.
@@ -275,7 +273,7 @@ to start the upload if a queue formed for this purpose.`,
}, {
Name: "db_wait_time",
Default: DefCacheDbWaitTime,
Help: `How long to wait for the DB to be available - 0 is unlimited.
Help: `How long to wait for the DB to be available - 0 is unlimited
Only one process can have the DB open at any one time, so rclone waits
for this duration for the DB to become available before it gives an
@@ -341,14 +339,8 @@ func parseRootPath(path string) (string, error) {
return strings.Trim(path, "/"), nil
}
var warnDeprecated sync.Once
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
warnDeprecated.Do(func() {
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
})
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -422,8 +414,8 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
dbPath := f.opt.DbPath
chunkPath := f.opt.ChunkPath
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
if dbPath != filepath.Join(config.GetCacheDir(), "cache-backend") &&
chunkPath == filepath.Join(config.GetCacheDir(), "cache-backend") {
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
chunkPath = dbPath
}
if filepath.Ext(dbPath) != "" {

View File

@@ -1,5 +1,5 @@
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
// +build !plan9,!js
// +build !race
package cache_test
@@ -16,7 +16,6 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"runtime/debug"
"strings"
"testing"
@@ -294,9 +293,6 @@ func TestInternalCachedWrittenContentMatches(t *testing.T) {
}
func TestInternalDoubleWrittenContentMatches(t *testing.T) {
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -685,9 +681,6 @@ func TestInternalCacheWrites(t *testing.T) {
}
func TestInternalMaxChunkSizeRespected(t *testing.T) {
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -843,7 +836,7 @@ func newRun() *run {
if uploadDir == "" {
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
if err != nil {
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
log.Fatalf("Failed to create temp dir: %v", err)
}
} else {
r.tmpUploadDir = uploadDir
@@ -899,7 +892,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
m.Set("type", "cache")
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
} else {
remoteType := config.FileGet(remote, "type")
remoteType := config.FileGet(remote, "type", "")
if remoteType == "" {
t.Skipf("skipped due to invalid remote type for %v", remote)
return nil, nil
@@ -910,14 +903,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
m.Set("password", cryptPassword1)
m.Set("password2", cryptPassword2)
}
remoteRemote := config.FileGet(remote, "remote")
remoteRemote := config.FileGet(remote, "remote", "")
if remoteRemote == "" {
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
return nil, nil
}
remoteRemoteParts := strings.Split(remoteRemote, ":")
remoteWrapping := remoteRemoteParts[0]
remoteType := config.FileGet(remoteWrapping, "type")
remoteType := config.FileGet(remoteWrapping, "type", "")
if remoteType != "cache" {
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
return nil, nil
@@ -926,9 +919,9 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
}
runInstance.rootIsCrypt = rootIsCrypt
runInstance.dbPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote+".db")
runInstance.chunkPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote)
runInstance.vfsCachePath = filepath.Join(config.GetCacheDir(), "vfs", remote)
runInstance.dbPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
runInstance.chunkPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
runInstance.vfsCachePath = filepath.Join(config.CacheDir, "vfs", remote)
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err)

View File

@@ -1,7 +1,7 @@
// Test Cache filesystem interface
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
// +build !plan9,!js
// +build !race
package cache_test

View File

@@ -1,7 +1,6 @@
// Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js
package cache

View File

@@ -1,5 +1,5 @@
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
// +build !plan9,!js
// +build !race
package cache_test

View File

@@ -1,4 +1,3 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -1,4 +1,3 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -1,4 +1,3 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -1,4 +1,3 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -1,4 +1,3 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -1,4 +1,3 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -150,13 +150,12 @@ func init() {
Name: "remote",
Required: true,
Help: `Remote to chunk/unchunk.
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
"myremote:bucket" or maybe "myremote:" (not recommended).`,
}, {
Name: "chunk_size",
Advanced: false,
Default: fs.SizeSuffix(2147483648), // 2 GiB
Default: fs.SizeSuffix(2147483648), // 2GB
Help: `Files larger than chunk size will be split in chunks.`,
}, {
Name: "name_format",
@@ -164,7 +163,6 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
Hide: fs.OptionHideCommandLine,
Default: `*.rclone_chunk.###`,
Help: `String format of chunk file names.
The two placeholders are: base file name (*) and chunk number (#...).
There must be one and only one asterisk and one or more consecutive hash characters.
If chunk number has less digits than the number of hashes, it is left-padded by zeros.
@@ -176,57 +174,48 @@ Possible chunk files are ignored if their name does not match given format.`,
Hide: fs.OptionHideCommandLine,
Default: 1,
Help: `Minimum valid chunk number. Usually 0 or 1.
By default chunk numbers start from 1.`,
}, {
Name: "meta_format",
Advanced: true,
Hide: fs.OptionHideCommandLine,
Default: "simplejson",
Help: `Format of the metadata object or "none".
By default "simplejson".
Help: `Format of the metadata object or "none". By default "simplejson".
Metadata is a small JSON file named after the composite file.`,
Examples: []fs.OptionExample{{
Value: "none",
Help: `Do not use metadata files at all.
Requires hash type "none".`,
Help: `Do not use metadata files at all. Requires hash type "none".`,
}, {
Value: "simplejson",
Help: `Simple JSON supports hash sums and chunk validation.
It has the following fields: ver, size, nchunks, md5, sha1.`,
}},
}, {
Name: "hash_type",
Advanced: false,
Default: "md5",
Help: `Choose how chunker handles hash sums.
All modes but "none" require metadata.`,
Help: `Choose how chunker handles hash sums. All modes but "none" require metadata.`,
Examples: []fs.OptionExample{{
Value: "none",
Help: `Pass any hash supported by wrapped remote for non-chunked files.
Return nothing otherwise.`,
Help: `Pass any hash supported by wrapped remote for non-chunked files, return nothing otherwise`,
}, {
Value: "md5",
Help: `MD5 for composite files.`,
Help: `MD5 for composite files`,
}, {
Value: "sha1",
Help: `SHA1 for composite files.`,
Help: `SHA1 for composite files`,
}, {
Value: "md5all",
Help: `MD5 for all files.`,
Help: `MD5 for all files`,
}, {
Value: "sha1all",
Help: `SHA1 for all files.`,
Help: `SHA1 for all files`,
}, {
Value: "md5quick",
Help: `Copying a file to chunker will request MD5 from the source.
Falling back to SHA1 if unsupported.`,
Help: `Copying a file to chunker will request MD5 from the source falling back to SHA1 if unsupported`,
}, {
Value: "sha1quick",
Help: `Similar to "md5quick" but prefers SHA1 over MD5.`,
Help: `Similar to "md5quick" but prefers SHA1 over MD5`,
}},
}, {
Name: "fail_hard",
@@ -288,10 +277,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
return nil, errors.New("can't point remote at itself - check the value of the remote setting")
}
baseName, basePath, err := fspath.SplitFs(remote)
baseName, basePath, err := fspath.Parse(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
}
if baseName != "" {
baseName += ":"
}
// Look for a file first
remotePath := fspath.JoinRootPath(basePath, rpath)
baseFs, err := cache.Get(ctx, baseName+remotePath)
@@ -443,10 +435,10 @@ func (f *Fs) setHashType(hashType string) error {
f.hashFallback = true
case "md5all":
f.useMD5 = true
f.hashAll = !f.base.Hashes().Contains(hash.MD5) || f.base.Features().SlowHash
f.hashAll = !f.base.Hashes().Contains(hash.MD5)
case "sha1all":
f.useSHA1 = true
f.hashAll = !f.base.Hashes().Contains(hash.SHA1) || f.base.Features().SlowHash
f.hashAll = !f.base.Hashes().Contains(hash.SHA1)
default:
return fmt.Errorf("unsupported hash type '%s'", hashType)
}
@@ -823,7 +815,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
tempEntries = append(tempEntries, wrapDir)
default:
if f.opt.FailHard {
return nil, fmt.Errorf("unknown object type %T", entry)
return nil, fmt.Errorf("Unknown object type %T", entry)
}
fs.Debugf(f, "unknown object type %T", entry)
}
@@ -1110,7 +1102,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
switch o.f.opt.MetaFormat {
case "simplejson":
if len(data) > maxMetadataSizeWritten {
if data != nil && len(data) > maxMetadataSizeWritten {
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
}
var metadata metaSimpleJSON
@@ -1225,7 +1217,7 @@ func (f *Fs) put(
// and skips the "EOF" read. Hence, switch to next limit here.
if !(c.chunkLimit == 0 || c.chunkLimit == c.chunkSize || c.sizeTotal == -1 || c.done) {
silentlyRemove(ctx, chunk)
return nil, fmt.Errorf("destination ignored %d data bytes", c.chunkLimit)
return nil, fmt.Errorf("Destination ignored %d data bytes", c.chunkLimit)
}
c.chunkLimit = c.chunkSize
@@ -1234,7 +1226,7 @@ func (f *Fs) put(
// Validate uploaded size
if c.sizeTotal != -1 && c.readCount != c.sizeTotal {
return nil, fmt.Errorf("incorrect upload size %d != %d", c.readCount, c.sizeTotal)
return nil, fmt.Errorf("Incorrect upload size %d != %d", c.readCount, c.sizeTotal)
}
// Check for input that looks like valid metadata
@@ -1271,7 +1263,7 @@ func (f *Fs) put(
sizeTotal += chunk.Size()
}
if sizeTotal != c.readCount {
return nil, fmt.Errorf("incorrect chunks size %d != %d", sizeTotal, c.readCount)
return nil, fmt.Errorf("Incorrect chunks size %d != %d", sizeTotal, c.readCount)
}
// If previous object was chunked, remove its chunks
@@ -1459,7 +1451,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
c.accountBytes(size)
return nil
}
const bufLen = 1048576 // 1 MiB
const bufLen = 1048576 // 1MB
buf := make([]byte, bufLen)
for size > 0 {
n := size
@@ -2451,7 +2443,7 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
// Be strict about JSON format
// to reduce possibility that a random small file resembles metadata.
if len(data) > maxMetadataSizeWritten {
if data != nil && len(data) > maxMetadataSizeWritten {
return nil, false, ErrMetaTooBig
}
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {

View File

@@ -12,8 +12,6 @@ import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
@@ -35,35 +33,11 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
Size: int64(kilobytes) * int64(fs.Kibi),
Size: int64(kilobytes) * int64(fs.KibiByte),
})
})
}
type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
configMap := configmap.Simple{}
for key, val := range opts {
configMap[key] = fmt.Sprintf("%v", val)
}
rpath := fspath.JoinRootPath(f.Root(), path)
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), rpath)
fixFs, err := fs.NewFs(ctx, remote)
require.NoError(t, err)
return fixFs
}
var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: mtime1}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
// test chunk name parser
func testChunkNameFormat(t *testing.T, f *Fs) {
saveOpt := f.opt
@@ -643,13 +617,22 @@ func testMetadataInput(t *testing.T, f *Fs) {
}()
f.opt.FailHard = false
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
runSubtest := func(contents, name string) {
description := fmt.Sprintf("file with %s metadata", name)
filename := path.Join(dir, name)
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
part := testPutFile(ctx, t, f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
_ = testPutFile(ctx, t, f, filename, contents, "upload "+description, false)
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
_ = putFile(f, filename, contents, "upload "+description, false)
obj, err := f.NewObject(ctx, filename)
assert.NoError(t, err, "access "+description)
@@ -695,7 +678,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
// Test that chunker refuses to change on objects with future/unknown metadata
func testFutureProof(t *testing.T, f *Fs) {
if !f.useMeta {
if f.opt.MetaFormat == "none" {
t.Skip("this test requires metadata support")
}
@@ -861,44 +844,6 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
_ = operations.Purge(ctx, f.base, dir)
}
// Test that md5all creates metadata even for small files
func testMD5AllSlow(t *testing.T, f *Fs) {
ctx := context.Background()
fsResult := deriveFs(ctx, t, f, "md5all", settings{
"chunk_size": "1P",
"name_format": "*.#",
"hash_type": "md5all",
"transactions": "rename",
"meta_format": "simplejson",
})
chunkFs, ok := fsResult.(*Fs)
require.True(t, ok, "fs must be a chunker remote")
baseFs := chunkFs.base
if !baseFs.Features().SlowHash {
t.Skipf("this test needs a base fs with slow hash, e.g. local")
}
assert.True(t, chunkFs.useMD5, "must use md5")
assert.True(t, chunkFs.hashAll, "must hash all files")
_ = testPutFile(ctx, t, chunkFs, "file", "-", "error", true)
obj, err := chunkFs.NewObject(ctx, "file")
require.NoError(t, err)
sum, err := obj.Hash(ctx, hash.MD5)
assert.NoError(t, err)
assert.Equal(t, "336d5ebc5436534e61d16e63ddfca327", sum)
list, err := baseFs.List(ctx, "")
require.NoError(t, err)
assert.Equal(t, 2, len(list))
_, err = baseFs.NewObject(ctx, "file")
assert.NoError(t, err, "metadata must be created")
_, err = baseFs.NewObject(ctx, "file.1")
assert.NoError(t, err, "first chunk must be created")
require.NoError(t, operations.Purge(ctx, baseFs, ""))
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PutLarge", func(t *testing.T) {
@@ -931,9 +876,6 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("ChunkerServerSideMove", func(t *testing.T) {
testChunkerServerSideMove(t, f)
})
t.Run("MD5AllSlow", func(t *testing.T) {
testMD5AllSlow(t, f)
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -36,7 +36,7 @@ import (
// Globals
const (
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
maxChunkSize = 8388608 // at 256KB and 8 MB.
bufferSize = 8388608
heuristicBytes = 1048576
@@ -53,7 +53,7 @@ const (
Gzip = 2
)
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9-_]{11})$")
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9+_]{11})$")
// Register with Fs
func init() {
@@ -83,23 +83,23 @@ func init() {
Name: "level",
Help: `GZIP compression level (-2 to 9).
Generally -1 (default, equivalent to 5) is recommended.
Levels 1 to 9 increase compression at the cost of speed. Going past 6
generally offers very little return.
Level -2 uses Huffmann encoding only. Only use if you know what you
are doing.
Level 0 turns off compression.`,
Generally -1 (default, equivalent to 5) is recommended.
Levels 1 to 9 increase compressiong at the cost of speed.. Going past 6
generally offers very little return.
Level -2 uses Huffmann encoding only. Only use if you now what you
are doing
Level 0 turns off compression.`,
Default: sgzip.DefaultCompression,
Advanced: true,
}, {
Name: "ram_cache_limit",
Help: `Some remotes don't allow the upload of files with unknown size.
In this case the compressed file will need to be cached to determine
it's size.
Files smaller than this limit will be cached in RAM, files larger than
this limit will be cached on disk.`,
In this case the compressed file will need to be cached to determine
it's size.
Files smaller than this limit will be cached in RAM, file larger than
this limit will be cached on disk`,
Default: fs.SizeSuffix(20 * 1024 * 1024),
Advanced: true,
}},
@@ -1260,7 +1260,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
return o.Object.Open(ctx, options...)
}
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
var openOptions []fs.OpenOption = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {

View File

@@ -12,14 +12,12 @@ import (
"strconv"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/version"
"github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/scrypt"
@@ -444,32 +442,11 @@ func (c *Cipher) encryptFileName(in string) string {
if !c.dirNameEncrypt && i != (len(segments)-1) {
continue
}
// Strip version string so that only the non-versioned part
// of the file name gets encrypted/obfuscated
hasVersion := false
var t time.Time
if i == (len(segments)-1) && version.Match(segments[i]) {
var s string
t, s = version.Remove(segments[i])
// version.Remove can fail, in which case it returns segments[i]
if s != segments[i] {
segments[i] = s
hasVersion = true
}
}
if c.mode == NameEncryptionStandard {
segments[i] = c.encryptSegment(segments[i])
} else {
segments[i] = c.obfuscateSegment(segments[i])
}
// Add back a version to the encrypted/obfuscated
// file name, if we stripped it off earlier
if hasVersion {
segments[i] = version.Add(segments[i], t)
}
}
return strings.Join(segments, "/")
}
@@ -500,21 +477,6 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
if !c.dirNameEncrypt && i != (len(segments)-1) {
continue
}
// Strip version string so that only the non-versioned part
// of the file name gets decrypted/deobfuscated
hasVersion := false
var t time.Time
if i == (len(segments)-1) && version.Match(segments[i]) {
var s string
t, s = version.Remove(segments[i])
// version.Remove can fail, in which case it returns segments[i]
if s != segments[i] {
segments[i] = s
hasVersion = true
}
}
if c.mode == NameEncryptionStandard {
segments[i], err = c.decryptSegment(segments[i])
} else {
@@ -524,12 +486,6 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
if err != nil {
return "", err
}
// Add back a version to the decrypted/deobfuscated
// file name, if we stripped it off earlier
if hasVersion {
segments[i] = version.Add(segments[i], t)
}
}
return strings.Join(segments, "/"), nil
}
@@ -538,18 +494,10 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
func (c *Cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
return "", ErrorNotAnEncryptedFile
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
return in[:remainingLength], nil
}
decrypted := in[:remainingLength]
if version.Match(decrypted) {
_, unversioned := version.Remove(decrypted)
if unversioned == "" {
return "", ErrorNotAnEncryptedFile
}
}
// Leave the version string on, if it was there
return decrypted, nil
return "", ErrorNotAnEncryptedFile
}
return c.decryptFileName(in)
}

View File

@@ -160,29 +160,22 @@ func TestEncryptFileName(t *testing.T) {
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
// Standard mode with directory name encryption off
c, _ = newCipher(NameEncryptionStandard, "", "", false)
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
// Now off mode
c, _ = newCipher(NameEncryptionOff, "", "", true)
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
// Obfuscation mode
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
// Obfuscation mode with directory name encryption off
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
}
@@ -201,19 +194,14 @@ func TestDecryptFileName(t *testing.T) {
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
actual, actualErr := c.DecryptFileName(test.in)

View File

@@ -30,7 +30,7 @@ func init() {
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "filename_encryption",
@@ -39,13 +39,13 @@ func init() {
Examples: []fs.OptionExample{
{
Value: "standard",
Help: "Encrypt the filenames.\nSee the docs for the details.",
Help: "Encrypt the filenames see the docs for the details.",
}, {
Value: "obfuscate",
Help: "Very simple filename obfuscation.",
}, {
Value: "off",
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
},
},
}, {
@@ -71,7 +71,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
Required: true,
}, {
Name: "password2",
Help: "Password or pass phrase for salt.\n\nOptional but recommended.\nShould be different to the previous password.",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
IsPassword: true,
}, {
Name: "server_side_across_configs",
@@ -363,11 +363,7 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
if f.opt.NoDataEncryption {
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
if err == nil && o != nil {
o = f.newObject(o)
}
return o, err
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
}
// Encrypt the data into wrappedIn
@@ -408,16 +404,13 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash")
}
if srcHash != "" && dstHash != "" {
if srcHash != dstHash {
// remove object
err = o.Remove(ctx)
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object
err = o.Remove(ctx)
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
}
}
@@ -999,9 +992,6 @@ func (o *ObjectInfo) Size() int64 {
if size < 0 {
return size
}
if o.f.opt.NoDataEncryption {
return size
}
return o.f.cipher.EncryptedSize(size)
}

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime"
@@ -18,10 +17,8 @@ import (
"github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/sync"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
@@ -114,7 +111,6 @@ func TestInternalParseExtensions(t *testing.T) {
}
func TestInternalFindExportFormat(t *testing.T) {
ctx := context.Background()
item := &drive.File{
Name: "file",
MimeType: "application/vnd.google-apps.document",
@@ -132,7 +128,7 @@ func TestInternalFindExportFormat(t *testing.T) {
} {
f := new(Fs)
f.exportExtensions = test.extensions
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(ctx, item)
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
assert.Equal(t, test.wantExtension, gotExtension)
if test.wantExtension != "" {
assert.Equal(t, item.Name+gotExtension, gotFilename)
@@ -464,81 +460,6 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
})
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
opt := &filter.Opt{}
err := opt.MaxAge.Set("1h")
assert.NoError(t, err)
flt, err := filter.NewFilter(opt)
assert.NoError(t, err)
defCtx := context.Background()
fltCtx := filter.ReplaceConfig(defCtx, flt)
testCtx1 := fltCtx
testCtx2 := filter.SetUseFilter(testCtx1, true)
testCtx3, testCancel := context.WithCancel(testCtx2)
testCtx4 := filter.SetUseFilter(testCtx3, false)
testCancel()
assert.False(t, filter.GetUseFilter(testCtx1))
assert.True(t, filter.GetUseFilter(testCtx2))
assert.True(t, filter.GetUseFilter(testCtx3))
assert.False(t, filter.GetUseFilter(testCtx4))
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), "agequery-testdir")
subFsResult, err := fs.NewFs(defCtx, subRemote)
require.NoError(t, err)
subFs, isDriveFs := subFsResult.(*Fs)
require.True(t, isDriveFs)
tempDir1, err := ioutil.TempDir("", "rclone-drive-agequery1-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempDir1)
}()
tempFs1, err := fs.NewFs(defCtx, tempDir1)
require.NoError(t, err)
tempDir2, err := ioutil.TempDir("", "rclone-drive-agequery2-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempDir2)
}()
tempFs2, err := fs.NewFs(defCtx, tempDir2)
require.NoError(t, err)
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
_, _ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
// validate sync/copy
const timeQuery = "(modifiedTime >= '"
assert.NoError(t, sync.CopyDir(defCtx, subFs, tempFs1, false))
assert.NotContains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, sync.CopyDir(fltCtx, subFs, tempFs1, false))
assert.Contains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, sync.CopyDir(fltCtx, tempFs2, subFs, false))
assert.Contains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, sync.CopyDir(defCtx, tempFs2, subFs, false))
assert.NotContains(t, subFs.lastQuery, timeQuery)
// validate list/walk
devNull, errOpen := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
require.NoError(t, errOpen)
defer func() {
_ = devNull.Close()
}()
assert.NoError(t, operations.List(defCtx, subFs, devNull))
assert.NotContains(t, subFs.lastQuery, timeQuery)
assert.NoError(t, operations.List(fltCtx, subFs, devNull))
assert.Contains(t, subFs.lastQuery, timeQuery)
}
func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
@@ -556,7 +477,6 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyID", f.InternalTestCopyID)
t.Run("AgeQuery", f.InternalTestAgeQuery)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -94,7 +94,7 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res)
}
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return nil, err
@@ -202,7 +202,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
again, err := rx.f.shouldRetry(ctx, err)
again, err := rx.f.shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false
err = nil

View File

@@ -1,358 +0,0 @@
// This file contains the implementation of the sync batcher for uploads
//
// Dropbox rules say you can start as many batches as you want, but
// you may only have one batch being committed and must wait for the
// batch to be finished before committing another.
package dropbox
import (
"context"
"fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
)
const (
maxBatchSize = 1000 // max size the batch can be
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
defaultBatchSizeAsync = 100 // default batch size if async
)
// batcher holds info about the current items waiting for upload
type batcher struct {
f *Fs // Fs this batch is part of
mode string // configured batch mode
size int // maximum size for batch
timeout time.Duration // idle timeout for batch
async bool // whether we are using async batching
in chan batcherRequest // incoming items to batch
closed chan struct{} // close to indicate batcher shut down
atexit atexit.FnHandle // atexit handle
shutOnce sync.Once // make sure we shutdown once only
wg sync.WaitGroup // wait for shutdown
}
// batcherRequest holds an incoming request with a place for a reply
type batcherRequest struct {
commitInfo *files.UploadSessionFinishArg
result chan<- batcherResponse
}
// Return true if batcherRequest is the quit request
func (br *batcherRequest) isQuit() bool {
return br.commitInfo == nil
}
// Send this to get the engine to quit
var quitRequest = batcherRequest{}
// batcherResponse holds a response to be delivered to clients waiting
// for a batch to complete.
type batcherResponse struct {
err error
entry *files.FileMetadata
}
// newBatcher creates a new batcher structure
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
return nil, errors.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
switch mode {
case "sync":
if size <= 0 {
ci := fs.GetConfig(ctx)
size = ci.Transfers
}
if timeout <= 0 {
timeout = defaultTimeoutSync
}
case "async":
if size <= 0 {
size = defaultBatchSizeAsync
}
if timeout <= 0 {
timeout = defaultTimeoutAsync
}
async = true
case "off":
size = 0
default:
return nil, errors.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
f: f,
mode: mode,
size: size,
timeout: timeout,
async: async,
in: make(chan batcherRequest, size),
closed: make(chan struct{}),
}
if b.Batching() {
b.atexit = atexit.Register(b.Shutdown)
b.wg.Add(1)
go b.commitLoop(context.Background())
}
return b, nil
}
// Batching returns true if batching is active
func (b *batcher) Batching() bool {
return b.size > 0
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
var arg = &files.UploadSessionFinishBatchArg{
Entries: items,
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {
return nil, errors.Wrap(err, "batch commit failed")
}
return batchStatus, nil
}
// finishBatchJobStatus waits for the batch to complete returning completed entries
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxSleepTime = 1 * time.Second
startTime := time.Now()
try := 1
for {
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
if remaining < 0 {
break
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
} else {
if batchStatus.Tag == "complete" {
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > maxSleepTime {
sleepTime = maxSleepTime
}
try++
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, errors.Wrapf(err, "wait for batch failed after %d tries in %v", try, time.Since(startTime))
}
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}
}
}
}()
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
fs.Debugf(b.f, "Committing %s", desc)
// finalise the batch getting either a result or a job id to poll
batchStatus, err := b.finishBatch(ctx, items)
if err != nil {
return err
}
// check whether batch is complete
var complete *files.UploadSessionFinishBatchResult
switch batchStatus.Tag {
case "async_job_id":
// wait for batch to complete
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
if err != nil {
return err
}
case "complete":
complete = batchStatus.Complete
default:
return errors.Errorf("batch returned unknown status %q", batchStatus.Tag)
}
// Check we got the right number of entries
entries := complete.Entries
if len(entries) != len(results) {
return errors.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
}
// Report results to clients
var (
errorTag = ""
errorCount = 0
)
for i := range results {
item := entries[i]
resp := batcherResponse{}
if item.Tag == "success" {
resp.entry = item.Success
} else {
errorCount++
errorTag = item.Tag
if item.Failure != nil {
errorTag = item.Failure.Tag
if item.Failure.LookupFailed != nil {
errorTag += "/" + item.Failure.LookupFailed.Tag
}
if item.Failure.Path != nil {
errorTag += "/" + item.Failure.Path.Tag
}
if item.Failure.PropertiesError != nil {
errorTag += "/" + item.Failure.PropertiesError.Tag
}
}
resp.err = errors.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
}
}
// Show signalled so no need to report error to clients from now on
signalled = true
// Report an error if any failed in the batch
if errorTag != "" {
return errors.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)
return nil
}
// commitLoop runs the commit engine in the background
func (b *batcher) commitLoop(ctx context.Context) {
var (
items []*files.UploadSessionFinishArg // current batch of uncommitted files
results []chan<- batcherResponse // current batch of clients awaiting results
idleTimer = time.NewTimer(b.timeout)
commit = func() {
err := b.commitBatch(ctx, items, results)
if err != nil {
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
}
items, results = nil, nil
}
)
defer b.wg.Done()
defer idleTimer.Stop()
idleTimer.Stop()
outer:
for {
select {
case req := <-b.in:
if req.isQuit() {
break outer
}
items = append(items, req.commitInfo)
results = append(results, req.result)
idleTimer.Stop()
if len(items) >= b.size {
commit()
} else {
idleTimer.Reset(b.timeout)
}
case <-idleTimer.C:
if len(items) > 0 {
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
commit()
}
}
}
// commit any remaining items
if len(items) > 0 {
commit()
}
}
// Shutdown finishes any pending batches then shuts everything down
//
// Can be called from atexit handler
func (b *batcher) Shutdown() {
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Commiting uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message
//
// Note that we don't close b.in because that will
// cause write to closed channel in Commit when we are
// exiting due to a signal.
b.in <- quitRequest
b.wg.Wait()
})
}
// Commit commits the file using a batch call, first adding it to the
// batch and then waiting for the batch to complete in a synchronous
// way if async is not set.
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
select {
case <-b.closed:
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
default:
}
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
resp := make(chan batcherResponse, 1)
b.in <- batcherRequest{
commitInfo: commitInfo,
result: resp,
}
// If running async then don't wait for the result
if b.async {
return nil, nil
}
result := <-resp
return result.entry, result.err
}

View File

@@ -25,19 +25,20 @@ import (
"context"
"fmt"
"io"
"log"
"path"
"regexp"
"strings"
"time"
"unicode/utf8"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/auth"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/common"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/dropbox/dbhash"
"github.com/rclone/rclone/fs"
@@ -64,9 +65,9 @@ const (
// Upload chunk size - setting too small makes uploads slow.
// Chunks are buffered into memory for retries.
//
// Speed vs chunk size uploading a 1 GiB file on 2017-11-22
// Speed vs chunk size uploading a 1 GB file on 2017-11-22
//
// Chunk Size MiB, Speed MiB/s, % of max
// Chunk Size MB, Speed Mbyte/s, % of max
// 1 1.364 11%
// 2 2.443 19%
// 4 4.288 33%
@@ -81,11 +82,11 @@ const (
// 96 12.302 95%
// 128 12.945 100%
//
// Choose 48 MiB which is 91% of Maximum speed. rclone by
// default does 4 transfers so this should use 4*48 MiB = 192 MiB
// Choose 48MB which is 91% of Maximum speed. rclone by
// default does 4 transfers so this should use 4*48MB = 192MB
// by default.
defaultChunkSize = 48 * fs.Mebi
maxChunkSize = 150 * fs.Mebi
defaultChunkSize = 48 * fs.MebiByte
maxChunkSize = 150 * fs.MebiByte
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
maxFileNameLength = 255
)
@@ -98,10 +99,8 @@ var (
"files.content.write",
"files.content.read",
"sharing.write",
"account_info.read", // needed for About
// "file_requests.write",
// "members.read", // needed for impersonate - but causes app to need to be approved by Dropbox Team Admin during the flow
// "team_data.member"
},
// Endpoint: oauth2.Endpoint{
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
@@ -131,36 +130,39 @@ func getOauthConfig(m configmap.Mapper) *oauth2.Config {
}
// Make a copy of the config
config := *dropboxConfig
// Make a copy of the scopes with extra scopes requires appended
config.Scopes = append(config.Scopes, "members.read", "team_data.member")
// Make a copy of the scopes with "members.read" appended
config.Scopes = append(config.Scopes, "members.read")
return &config
}
// Register with Fs
func init() {
DbHashType = hash.RegisterHash("dropbox", "DropboxHash", 64, dbhash.New)
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
fs.Register(&fs.RegInfo{
Name: "dropbox",
Description: "Dropbox",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: getOauthConfig(m),
NoOffline: true,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
opt := oauthutil.Options{
NoOffline: true,
OAuth2Opts: []oauth2.AuthCodeOption{
oauth2.SetAuthURLParam("token_access_type", "offline"),
},
})
}
err := oauthutil.Config(ctx, "dropbox", name, m, getOauthConfig(m), &opt)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size (< %v).
Help: fmt.Sprintf(`Upload chunk size. (< %v).
Any files larger than this will be uploaded in chunks of this size.
Note that chunks are buffered in memory (one at a time) so rclone can
deal with retries. Setting this larger will increase the speed
slightly (at most 10%% for 128 MiB in tests) at the cost of using more
slightly (at most 10%% for 128MB in tests) at the cost of using more
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
Default: defaultChunkSize,
Advanced: true,
@@ -209,68 +211,6 @@ Note that we don't unmount the shared folder afterwards so the
shared folder.`,
Default: false,
Advanced: true,
}, {
Name: "batch_mode",
Help: `Upload file batching sync|async|off.
This sets the batch mode used by rclone.
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
This has 3 possible values
- off - no batching
- sync - batch uploads and check completion (default)
- async - batch upload and don't check completion
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
`,
Default: "sync",
Advanced: true,
}, {
Name: "batch_size",
Help: `Max number of files in upload batch.
This sets the batch size of files to upload. It has to be less than 1000.
By default this is 0 which means rclone which calculate the batch size
depending on the setting of batch_mode.
- batch_mode: async - default batch_size is 100
- batch_mode: sync - default batch_size is the same as --transfers
- batch_mode: off - not in use
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
Setting this is a great idea if you are uploading lots of small files
as it will make them a lot quicker. You can use --transfers 32 to
maximise throughput.
`,
Default: 0,
Advanced: true,
}, {
Name: "batch_timeout",
Help: `Max time to allow an idle upload batch before uploading.
If an upload batch is idle for more than this long then it will be
uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 500ms
- batch_mode: sync - default batch_timeout is 10s
- batch_mode: off - not in use
`,
Default: fs.Duration(0),
Advanced: true,
}, {
Name: "batch_commit_timeout",
Help: `Max time to wait for a batch to finish comitting`,
Default: fs.Duration(10 * time.Minute),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -290,16 +230,11 @@ default based on the batch_mode in use.
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
AsyncBatch bool `config:"async_batch"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote dropbox server
@@ -318,7 +253,6 @@ type Fs struct {
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
batcher *batcher // batch builder
}
// Object describes a dropbox object
@@ -334,6 +268,8 @@ type Object struct {
hash string // content_hash of the object
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
@@ -356,10 +292,7 @@ func (f *Fs) Features() *fs.Features {
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(err error) (bool, error) {
if err == nil {
return false, err
}
@@ -374,7 +307,7 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Logf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
}
return true, err
@@ -387,7 +320,7 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
@@ -444,10 +377,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
if err != nil {
return nil, err
}
cfg := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
Client: oAuthClient, // maybe???
@@ -496,7 +425,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if f.root == "" {
return f, nil
}
_, err := f.findSharedFile(ctx, f.root)
_, err := f.findSharedFile(f.root)
f.root = ""
if err == nil {
return f, fs.ErrorIsFile
@@ -516,7 +445,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// root is not empty so we have find the right shared folder if it exists
id, err := f.findSharedFolder(ctx, dir)
id, err := f.findSharedFolder(dir)
if err != nil {
// if we didn't find the specified shared folder we have to bail out here
return nil, err
@@ -524,7 +453,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// we found the specified shared folder so let's mount it
// this will add it to the users normal root namespace and allows us
// to actually perform operations on it using the normal api endpoints.
err = f.mountSharedFolder(ctx, id)
err = f.mountSharedFolder(id)
if err != nil {
switch e := err.(type) {
case sharing.MountFolderAPIError:
@@ -548,7 +477,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var acc *users.FullAccount
err = f.pacer.Call(func() (bool, error) {
acc, err = f.users.GetCurrentAccount()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "get current account failed")
@@ -566,7 +495,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.setRoot(root)
// See if the root is actually an object
_, err = f.getFileMetadata(ctx, f.slashRoot)
_, err = f.getFileMetadata(f.slashRoot)
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
@@ -580,7 +509,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// headerGenerator for dropbox sdk
func (f *Fs) headerGenerator(hostType string, namespace string, route string) map[string]string {
func (f *Fs) headerGenerator(hostType string, style string, namespace string, route string) map[string]string {
if f.ns == "" {
return map[string]string{}
}
@@ -600,12 +529,12 @@ func (f *Fs) setRoot(root string) {
}
// getMetadata gets the metadata for a file or directory
func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMetadata, notFound bool, err error) {
func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
err = f.pacer.Call(func() (bool, error) {
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
Path: f.opt.Enc.FromStandardPath(objPath),
})
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
switch e := err.(type) {
@@ -620,8 +549,8 @@ func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMet
}
// getFileMetadata gets the metadata for a file
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *files.FileMetadata, err error) {
entry, notFound, err := f.getMetadata(ctx, filePath)
func (f *Fs) getFileMetadata(filePath string) (fileInfo *files.FileMetadata, err error) {
entry, notFound, err := f.getMetadata(filePath)
if err != nil {
return nil, err
}
@@ -630,17 +559,14 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *fi
}
fileInfo, ok := entry.(*files.FileMetadata)
if !ok {
if _, ok = entry.(*files.FolderMetadata); ok {
return nil, fs.ErrorIsDir
}
return nil, fs.ErrorNotAFile
}
return fileInfo, nil
}
// getDirMetadata gets the metadata for a directory
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files.FolderMetadata, err error) {
entry, notFound, err := f.getMetadata(ctx, dirPath)
func (f *Fs) getDirMetadata(dirPath string) (dirInfo *files.FolderMetadata, err error) {
entry, notFound, err := f.getMetadata(dirPath)
if err != nil {
return nil, err
}
@@ -657,7 +583,7 @@ func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *files.FileMetadata) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -666,7 +592,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *files.F
if info != nil {
err = o.setMetadataFromEntry(info)
} else {
err = o.readEntryAndSetMetadata(ctx)
err = o.readEntryAndSetMetadata()
}
if err != nil {
return nil, err
@@ -678,14 +604,14 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *files.F
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if f.opt.SharedFiles {
return f.findSharedFile(ctx, remote)
return f.findSharedFile(remote)
}
return f.newObjectWithInfo(ctx, remote, nil)
return f.newObjectWithInfo(remote, nil)
}
// listSharedFoldersApi lists all available shared folders mounted and not mounted
// we'll need the id later so we have to return them in original format
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
func (f *Fs) listSharedFolders() (entries fs.DirEntries, err error) {
started := false
var res *sharing.ListFoldersResult
for {
@@ -695,7 +621,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListFolders(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -707,7 +633,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListFoldersContinue(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
@@ -732,8 +658,8 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
// findSharedFolder find the id for a given shared folder name
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
// so our only option is to iterate over all shared folders
func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err error) {
entries, err := f.listSharedFolders(ctx)
func (f *Fs) findSharedFolder(name string) (id string, err error) {
entries, err := f.listSharedFolders()
if err != nil {
return "", err
}
@@ -746,20 +672,20 @@ func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err
}
// mountSharedFolder mount a shared folder to the root namespace
func (f *Fs) mountSharedFolder(ctx context.Context, id string) error {
func (f *Fs) mountSharedFolder(id string) error {
arg := sharing.MountFolderArg{
SharedFolderId: id,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.sharing.MountFolder(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
return err
}
// listReceivedFiles lists shared the user as access to (note this means individual
// files not files contained in shared folders)
func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err error) {
func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
started := false
var res *sharing.ListFilesResult
for {
@@ -769,7 +695,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListReceivedFiles(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -781,7 +707,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListReceivedFilesContinue(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
@@ -794,7 +720,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
fs: f,
url: entry.PreviewUrl,
remote: entryPath,
modTime: *entry.TimeInvited,
modTime: entry.TimeInvited,
}
if err != nil {
return nil, err
@@ -808,8 +734,8 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
return entries, nil
}
func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err error) {
files, err := f.listReceivedFiles(ctx)
func (f *Fs) findSharedFile(name string) (o *Object, err error) {
files, err := f.listReceivedFiles()
if err != nil {
return nil, err
}
@@ -832,10 +758,10 @@ func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err er
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.opt.SharedFiles {
return f.listReceivedFiles(ctx)
return f.listReceivedFiles()
}
if f.opt.SharedFolders {
return f.listSharedFolders(ctx)
return f.listSharedFolders()
}
root := f.slashRoot
@@ -856,7 +782,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
switch e := err.(type) {
@@ -874,7 +800,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolderContinue(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
@@ -904,7 +830,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
entries = append(entries, d)
} else if fileInfo != nil {
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
o, err := f.newObjectWithInfo(remote, fileInfo)
if err != nil {
return nil, err
}
@@ -953,7 +879,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
}
// check directory doesn't exist
_, err := f.getDirMetadata(ctx, root)
_, err := f.getDirMetadata(root)
if err == nil {
return nil // directory exists already
} else if err != fs.ErrorDirNotFound {
@@ -970,7 +896,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.CreateFolderV2(&arg2)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
return err
}
@@ -987,7 +913,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
if check {
// check directory exists
_, err = f.getDirMetadata(ctx, root)
_, err = f.getDirMetadata(root)
if err != nil {
return errors.Wrap(err, "Rmdir")
}
@@ -1004,7 +930,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
var res *files.ListFolderResult
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
@@ -1017,7 +943,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
// remove it
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
return shouldRetry(ctx, err)
return shouldRetry(err)
})
return err
}
@@ -1070,7 +996,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) {
result, err = f.srv.CopyV2(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "copy failed")
@@ -1131,7 +1057,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) {
result, err = f.srv.MoveV2(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "move failed")
@@ -1155,27 +1081,17 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
createArg := sharing.CreateSharedLinkWithSettingsArg{
Path: absPath,
Settings: &sharing.SharedLinkSettings{
RequestedVisibility: &sharing.RequestedVisibility{
Tagged: dropbox.Tagged{Tag: sharing.RequestedVisibilityPublic},
},
Audience: &sharing.LinkAudience{
Tagged: dropbox.Tagged{Tag: sharing.LinkAudiencePublic},
},
Access: &sharing.RequestedLinkAccessLevel{
Tagged: dropbox.Tagged{Tag: sharing.RequestedLinkAccessLevelViewer},
},
},
// FIXME this gives settings_error/not_authorized/.. errors
// and the expires setting isn't in the documentation so remove
// for now.
// Settings: &sharing.SharedLinkSettings{
// Expires: time.Now().Add(time.Duration(expire)).UTC().Round(time.Second),
// },
}
if expire < fs.DurationOff {
expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
createArg.Settings.Expires = &expiryTime
}
var linkRes sharing.IsSharedLinkMetadata
err = f.pacer.Call(func() (bool, error) {
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil && strings.Contains(err.Error(),
@@ -1188,7 +1104,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var listRes *sharing.ListSharedLinksResult
err = f.pacer.Call(func() (bool, error) {
listRes, err = f.sharing.ListSharedLinks(&listArg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return
@@ -1230,7 +1146,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
dstPath := path.Join(f.slashRoot, dstRemote)
// Check if destination exists
_, err := f.getDirMetadata(ctx, dstPath)
_, err := f.getDirMetadata(dstPath)
if err == nil {
return fs.ErrorDirExists
} else if err != fs.ErrorDirNotFound {
@@ -1249,7 +1165,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.MoveV2(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "MoveDir failed")
@@ -1263,7 +1179,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
var q *users.SpaceUsage
err = f.pacer.Call(func() (bool, error) {
q, err = f.users.GetSpaceUsage()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "about failed")
@@ -1294,7 +1210,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the StartCursor early so all changes from now on get processed
startCursor, err := f.changeNotifyCursor(ctx)
startCursor, err := f.changeNotifyCursor()
if err != nil {
fs.Infof(f, "Failed to get StartCursor: %s", err)
}
@@ -1319,7 +1235,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
}
case <-tickerC:
if startCursor == "" {
startCursor, err = f.changeNotifyCursor(ctx)
startCursor, err = f.changeNotifyCursor()
if err != nil {
fs.Infof(f, "Failed to get StartCursor: %s", err)
continue
@@ -1335,7 +1251,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
}()
}
func (f *Fs) changeNotifyCursor(ctx context.Context) (cursor string, err error) {
func (f *Fs) changeNotifyCursor() (cursor string, err error) {
var startCursor *files.ListFolderGetLatestCursorResult
err = f.pacer.Call(func() (bool, error) {
@@ -1350,7 +1266,7 @@ func (f *Fs) changeNotifyCursor(ctx context.Context) (cursor string, err error)
startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return
@@ -1380,7 +1296,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
}
res, err = f.svc.ListFolderLongpoll(&args)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return
@@ -1403,7 +1319,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
}
err = f.pacer.Call(func() (bool, error) {
changeList, err = f.srv.ListFolderContinue(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return "", errors.Wrap(err, "list continue")
@@ -1415,13 +1331,13 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
switch info := entry.(type) {
case *files.FolderMetadata:
entryType = fs.EntryDirectory
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
case *files.FileMetadata:
entryType = fs.EntryObject
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
case *files.DeletedMetadata:
entryType = fs.EntryObject
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
default:
fs.Errorf(entry, "dropbox ChangeNotify: ignoring unknown EntryType %T", entry)
continue
@@ -1443,13 +1359,6 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(DbHashType)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
f.batcher.Shutdown()
return nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -1483,7 +1392,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != DbHashType {
return "", hash.ErrUnsupported
}
err := o.readMetaData(ctx)
err := o.readMetaData()
if err != nil {
return "", errors.Wrap(err, "failed to read hash from metadata")
}
@@ -1507,17 +1416,17 @@ func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
}
// Reads the entry for a file from dropbox
func (o *Object) readEntry(ctx context.Context) (*files.FileMetadata, error) {
return o.fs.getFileMetadata(ctx, o.remotePath())
func (o *Object) readEntry() (*files.FileMetadata, error) {
return o.fs.getFileMetadata(o.remotePath())
}
// Read entry if not set and set metadata from it
func (o *Object) readEntryAndSetMetadata(ctx context.Context) error {
func (o *Object) readEntryAndSetMetadata() error {
// Last resort set time from client
if !o.modTime.IsZero() {
return nil
}
entry, err := o.readEntry(ctx)
entry, err := o.readEntry()
if err != nil {
return err
}
@@ -1530,12 +1439,12 @@ func (o *Object) remotePath() string {
}
// readMetaData gets the info if it hasn't already been fetched
func (o *Object) readMetaData(ctx context.Context) (err error) {
func (o *Object) readMetaData() (err error) {
if !o.modTime.IsZero() {
return nil
}
// Last resort
return o.readEntryAndSetMetadata(ctx)
return o.readEntryAndSetMetadata()
}
// ModTime returns the modification time of the object
@@ -1543,7 +1452,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx)
err := o.readMetaData()
if err != nil {
fs.Debugf(o, "Failed to read metadata: %v", err)
return time.Now()
@@ -1577,7 +1486,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
_, in, err = o.fs.sharing.GetSharedLinkFile(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -1593,7 +1502,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
_, in, err = o.fs.srv.Download(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
switch e := err.(type) {
@@ -1609,83 +1518,97 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// uploadChunked uploads the object in parts
//
// Will introduce two additional network requests to start and finish the session.
// If the size is unknown (i.e. -1) the method incurs one additional
// request to the Dropbox API that does not carry a payload to close the append session.
func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
// start upload
// Will work optimally if size is >= uploadChunkSize. If the size is either
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
// avoidable request to the Dropbox API that does not carry payload.
func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
chunkSize := int64(o.fs.opt.ChunkSize)
chunks := 0
if size != -1 {
chunks = int(size/chunkSize) + 1
}
in := readers.NewCountingReader(in0)
buf := make([]byte, int(chunkSize))
fmtChunk := func(cur int, last bool) {
if chunks == 0 && last {
fs.Debugf(o, "Streaming chunk %d/%d", cur, cur)
} else if chunks == 0 {
fs.Debugf(o, "Streaming chunk %d/unknown", cur)
} else {
fs.Debugf(o, "Uploading chunk %d/%d", cur, chunks)
}
}
// write the first chunk
fmtChunk(1, false)
var res *files.UploadSessionStartResult
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, nil)
return shouldRetry(ctx, err)
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
chunkSize := int64(o.fs.opt.ChunkSize)
chunks, remainder := size/chunkSize, size%chunkSize
if remainder > 0 {
chunks++
}
// write chunks
in := readers.NewCountingReader(in0)
buf := make([]byte, int(chunkSize))
cursor := files.UploadSessionCursor{
SessionId: res.SessionId,
Offset: 0,
}
appendArg := files.UploadSessionAppendArg{Cursor: &cursor}
for currentChunk := 1; ; currentChunk++ {
cursor.Offset = in.BytesRead()
appendArg := files.UploadSessionAppendArg{
Cursor: &cursor,
Close: false,
}
if chunks < 0 {
fs.Debugf(o, "Streaming chunk %d/unknown", currentChunk)
} else {
fs.Debugf(o, "Uploading chunk %d/%d", currentChunk, chunks)
// write more whole chunks (if any)
currentChunk := 2
for {
if chunks > 0 && currentChunk >= chunks {
// if the size is known, only upload full chunks. Remaining bytes are uploaded with
// the UploadSessionFinish request.
break
} else if chunks == 0 && in.BytesRead()-cursor.Offset < uint64(chunkSize) {
// if the size is unknown, upload as long as we can read full chunks from the reader.
// The UploadSessionFinish request will not contain any payload.
break
}
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
cursor.Offset = in.BytesRead()
fmtChunk(currentChunk, false)
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
// after session is started, we retry everything
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {
return nil, err
}
if appendArg.Close {
break
}
if size > 0 {
// if size is known, check if next chunk is final
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
} else {
// if size is unknown, upload as long as we can read full chunks from the reader
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
}
currentChunk++
}
// finish upload
// write the remains
cursor.Offset = in.BytesRead()
args := &files.UploadSessionFinishArg{
Cursor: &cursor,
Commit: commitInfo,
}
// If we are batching then we should have written all the data now
// store the commit info now for a batch commit
if o.fs.batcher.Batching() {
return o.fs.batcher.Commit(ctx, args)
}
fmtChunk(currentChunk, true)
chunk = readers.NewRepeatableReaderBuffer(in, buf)
err = o.fs.pacer.Call(func() (bool, error) {
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
@@ -1743,8 +1666,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
commitInfo.Mode.Tag = "overwrite"
// The Dropbox API only accepts timestamps in UTC with second precision.
clientModified := src.ModTime(ctx).UTC().Round(time.Second)
commitInfo.ClientModified = &clientModified
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
// Don't attempt to create filenames that are too long
if cErr := checkPathLength(commitInfo.Path); cErr != nil {
return cErr
@@ -1753,26 +1675,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
size := src.Size()
var err error
var entry *files.FileMetadata
if size > int64(o.fs.opt.ChunkSize) || size < 0 || o.fs.batcher.Batching() {
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
entry, err = o.uploadChunked(in, commitInfo, size)
} else {
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
entry, err = o.fs.srv.Upload(commitInfo, in)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
}
if err != nil {
return errors.Wrap(err, "upload failed")
}
// If we haven't received data back from batch upload then fake it
//
// This will only happen if we are uploading async batches
if entry == nil {
o.bytes = size
o.modTime = *commitInfo.ClientModified
o.hash = "" // we don't have this
return nil
}
return o.setMetadataFromEntry(entry)
}
@@ -1785,7 +1698,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
})
return shouldRetry(ctx, err)
return shouldRetry(err)
})
return err
}
@@ -1800,7 +1713,6 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = &Fs{}
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -4,7 +4,6 @@ import (
"context"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
@@ -29,10 +28,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(resp *http.Response, err error) (bool, error) {
// Detect this error which the integration tests provoke
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
//
@@ -78,7 +74,7 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
var file File
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &file)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read file info")
@@ -87,16 +83,10 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
return &file, err
}
// maybe do some actual validation later if necessary
func validToken(token *GetTokenResponse) bool {
return token.Status == "OK"
}
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
request := DownloadRequest{
URL: url,
Single: 1,
Pass: f.opt.FilePassword,
}
opts := rest.Opts{
Method: "POST",
@@ -106,8 +96,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
doretry, err := shouldRetry(ctx, resp, err)
return doretry || !validToken(&token), err
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
@@ -126,22 +115,16 @@ func fileFromSharedFile(file *SharedFile) File {
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
ContentType: "application/x-www-form-urlencoded",
}
if f.opt.FolderPassword != "" {
opts.Method = "POST"
opts.Parameters = nil
opts.Body = strings.NewReader("json=1&pass=" + url.QueryEscape(f.opt.FolderPassword))
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
}
var sharedFiles SharedFolderResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
@@ -170,7 +153,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
filesList = &FilesList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
@@ -198,7 +181,7 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
foldersList = &FoldersList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list folders")
@@ -292,7 +275,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
response = &MakeFolderResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create folder")
@@ -319,13 +302,13 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove folder")
}
if response.Status != "OK" {
return nil, errors.Errorf("can't remove folder: %s", response.Message)
return nil, errors.New("Can't remove non-empty dir")
}
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
@@ -348,7 +331,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
response = &GenericOKResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
@@ -375,7 +358,7 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
response = &MoveFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
@@ -400,7 +383,7 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
response = &CopyFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
@@ -410,34 +393,6 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
return response, nil
}
func (f *Fs) renameFile(ctx context.Context, url string, newName string) (response *RenameFileResponse, err error) {
request := &RenameFileRequest{
URLs: []RenameFileURL{
{
URL: url,
Filename: newName,
},
},
}
opts := rest.Opts{
Method: "POST",
Path: "/file/rename.cgi",
}
response = &RenameFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't rename file")
}
return response, nil
}
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node")
@@ -450,7 +405,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
response = &GetUploadNodeResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "didnt got an upload node")
@@ -493,7 +448,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
@@ -527,7 +482,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
response = &EndFileUploadResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {

View File

@@ -35,27 +35,17 @@ func init() {
fs.Register(&fs.RegInfo{
Name: "fichier",
Description: "1Fichier",
NewFs: NewFs,
Config: func(ctx context.Context, name string, config configmap.Mapper) {
},
NewFs: NewFs,
Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Name: "api_key",
}, {
Help: "If you want to download a shared folder, add this parameter.",
Help: "If you want to download a shared folder, add this parameter",
Name: "shared_folder",
Required: false,
Advanced: true,
}, {
Help: "If you want to download a shared file that is password protected, add this parameter.",
Name: "file_password",
Required: false,
Advanced: true,
IsPassword: true,
}, {
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
Name: "folder_password",
Required: false,
Advanced: true,
IsPassword: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -87,11 +77,9 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
FilePassword string `config:"file_password"`
FolderPassword string `config:"folder_password"`
Enc encoder.MultiEncoder `config:"encoding"`
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs is the interface a cloud storage system must provide
@@ -360,10 +348,8 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
return nil, err
}
if len(fileUploadResponse.Links) == 0 {
return nil, errors.New("upload response not found")
} else if len(fileUploadResponse.Links) > 1 {
fs.Debugf(remote, "Multiple upload responses found, using the first")
if len(fileUploadResponse.Links) != 1 {
return nil, errors.New("unexpected amount of files")
}
link := fileUploadResponse.Links[0]
@@ -437,45 +423,25 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantMove
}
// Find current directory ID
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
return nil, err
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
// If it is in the correct directory, just rename it
var url string
if currentDirectoryID == directoryID {
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't rename file")
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't rename file: %s", resp.Message)
}
url = resp.URLs[0].URL
} else {
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
}
url = resp.URLs[0]
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.New("couldn't move file")
}
file, err := f.readFileInfo(ctx, url)
file, err := f.readFileInfo(ctx, resp.URLs[0])
if err != nil {
return nil, errors.New("couldn't read file data")
}
@@ -506,7 +472,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
return nil, errors.New("couldn't move file")
}
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
@@ -517,21 +483,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
o, err := f.NewObject(ctx, remote)
if err != nil {
return "", err
}
return o.(*Object).file.URL, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ dircache.DirCacher = (*Fs)(nil)
)

View File

@@ -94,7 +94,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.rest.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {

View File

@@ -19,7 +19,6 @@ type ListFilesRequest struct {
type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
Pass string `json:"pass,omitempty"`
}
// RemoveFolderRequest is the request structure of the corresponding request
@@ -64,9 +63,8 @@ type MoveFileRequest struct {
// MoveFileResponse is the response structure of the corresponding request
type MoveFileResponse struct {
Status string `json:"status"`
Message string `json:"message"`
URLs []string `json:"urls"`
Status string `json:"status"`
URLs []string `json:"urls"`
}
// CopyFileRequest is the request structure of the corresponding request
@@ -78,10 +76,9 @@ type CopyFileRequest struct {
// CopyFileResponse is the response structure of the corresponding request
type CopyFileResponse struct {
Status string `json:"status"`
Message string `json:"message"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
Status string `json:"status"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
}
// FileCopy is used in the the CopyFileResponse
@@ -90,30 +87,6 @@ type FileCopy struct {
ToURL string `json:"to_url"`
}
// RenameFileURL is the data structure to rename a single file
type RenameFileURL struct {
URL string `json:"url"`
Filename string `json:"filename"`
}
// RenameFileRequest is the request structure of the corresponding request
type RenameFileRequest struct {
URLs []RenameFileURL `json:"urls"`
Pretty int `json:"pretty"`
}
// RenameFileResponse is the response structure of the corresponding request
type RenameFileResponse struct {
Status string `json:"status"`
Message string `json:"message"`
Renamed int `json:"renamed"`
URLs []struct {
URL string `json:"url"`
OldFilename string `json:"old_filename"`
NewFilename string `json:"new_filename"`
} `json:"urls"`
}
// GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct {
ID string `json:"id"`

View File

@@ -5,7 +5,6 @@ package api
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strings"
@@ -52,46 +51,11 @@ func (t Time) String() string {
return time.Time(t).UTC().Format(timeFormatParameters)
}
// Int represents an integer which can be represented in JSON as a
// quoted integer or an integer.
type Int int
// MarshalJSON turns a Int into JSON
func (i *Int) MarshalJSON() (out []byte, err error) {
return json.Marshal((*int)(i))
}
// UnmarshalJSON turns JSON into a Int
func (i *Int) UnmarshalJSON(data []byte) error {
if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' {
data = data[1 : len(data)-1]
}
return json.Unmarshal(data, (*int)(i))
}
// String represents an string which can be represented in JSON as a
// quoted string or an integer.
type String string
// MarshalJSON turns a String into JSON
func (s *String) MarshalJSON() (out []byte, err error) {
return json.Marshal((*string)(s))
}
// UnmarshalJSON turns JSON into a String
func (s *String) UnmarshalJSON(data []byte) error {
err := json.Unmarshal(data, (*string)(s))
if err != nil {
*s = String(data)
}
return nil
}
// Status return returned in all status responses
type Status struct {
Code string `json:"status"`
Message string `json:"statusmessage"`
TaskID String `json:"taskid"`
TaskID string `json:"taskid"`
// Warning string `json:"warning"` // obsolete
}
@@ -151,7 +115,7 @@ type GetFolderContentsResponse struct {
Total int `json:"total,string"`
Items []Item `json:"filelist"`
Folder Item `json:"folder"`
From Int `json:"from"`
From int `json:"from,string"`
//Count int `json:"count"`
Pid string `json:"pid"`
RefreshResult Status `json:"refreshresult"`

View File

@@ -65,7 +65,7 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "url",
Help: "URL of the Enterprise File Fabric to connect to.",
Help: "URL of the Enterprise File Fabric to connect to",
Required: true,
Examples: []fs.OptionExample{{
Value: "https://storagemadeeasy.com",
@@ -79,15 +79,14 @@ func init() {
}},
}, {
Name: "root_folder_id",
Help: `ID of the root folder.
Help: `ID of the root folder
Leave blank normally.
Fill in to make rclone start with directory of a given ID.
`,
}, {
Name: "permanent_token",
Help: `Permanent Authentication Token.
Help: `Permanent Authentication Token
A Permanent Authentication Token can be created in the Enterprise File
Fabric, on the users Dashboard under Security, there is an entry
@@ -100,7 +99,7 @@ For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
`,
}, {
Name: "token",
Help: `Session Token.
Help: `Session Token
This is a session token which rclone caches in the config file. It is
usually valid for 1 hour.
@@ -110,14 +109,14 @@ Don't set this value - rclone will set it automatically.
Advanced: true,
}, {
Name: "token_expiry",
Help: `Token expiry time.
Help: `Token expiry time
Don't set this value - rclone will set it automatically.
`,
Advanced: true,
}, {
Name: "version",
Help: `Version read from the file fabric.
Help: `Version read from the file fabric
Don't set this value - rclone will set it automatically.
`,
@@ -223,17 +222,13 @@ var retryStatusCodes = []struct {
// delete in that folder. Please try again later or use
// another name. (error_background)
code: "error_background",
sleep: 1 * time.Second,
sleep: 6 * time.Second,
},
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
// try should be the number of the tries so far, counting up from 1
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError, try int) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func (f *Fs) shouldRetry(resp *http.Response, err error, status api.OKError) (bool, error) {
if err != nil {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -246,10 +241,9 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
for _, retryCode := range retryStatusCodes {
if code == retryCode.code {
if retryCode.sleep > 0 {
// make this thread only sleep exponentially increasing extra time
sleepTime := retryCode.sleep << (try - 1)
fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", sleepTime, retryCode.code)
time.Sleep(sleepTime)
// make this thread only sleep extra time
fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", retryCode.sleep, retryCode.code)
time.Sleep(retryCode.sleep)
}
return true, err
}
@@ -403,13 +397,11 @@ func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKEr
ContentType: "application/x-www-form-urlencoded",
Options: options,
}
try := 0
err = f.pacer.Call(func() (bool, error) {
try++
// Refresh the body each retry
opts.Body = strings.NewReader(data.Encode())
resp, err = f.srv.CallJSON(ctx, &opts, nil, result)
return f.shouldRetry(ctx, resp, err, result, try)
return f.shouldRetry(resp, err, result)
})
if err != nil {
return resp, err
@@ -844,7 +836,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
}
// Wait for the the background task to complete if necessary
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID string) (err error) {
if taskID == "" || taskID == "0" {
// No task to wait for
return nil
@@ -1094,7 +1086,7 @@ func (o *Object) Size() int64 {
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) (err error) {
if info.Type != api.ItemTypeFile {
return fs.ErrorIsDir
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
}
o.hasMetaData = true
o.size = info.Size
@@ -1283,11 +1275,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var contentLength = size
opts.ContentLength = &contentLength // NB CallJSON scribbles on this which is naughty
}
try := 0
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
try++
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploader)
return o.fs.shouldRetry(ctx, resp, err, nil, try)
return o.fs.shouldRetry(resp, err, nil)
})
if err != nil {
return errors.Wrap(err, "failed to upload")

View File

@@ -21,7 +21,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
@@ -34,12 +33,6 @@ var (
currentUser = env.CurrentUser()
)
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -48,23 +41,26 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "host",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Help: "FTP host to connect to",
Required: true,
Examples: []fs.OptionExample{{
Value: "ftp.example.com",
Help: "Connect to ftp.example.com",
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + currentUser + ".",
Help: "FTP username, leave blank for current username, " + currentUser,
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21).",
Help: "FTP port, leave blank to use default (21)",
}, {
Name: "pass",
Help: "FTP password.",
Help: "FTP password",
IsPassword: true,
Required: true,
}, {
Name: "tls",
Help: `Use Implicit FTPS (FTP over TLS).
Help: `Use Implicit FTPS (FTP over TLS)
When using implicit FTP over TLS the client connects using TLS
right from the start which breaks compatibility with
non-TLS-aware servers. This is usually served over port 990 rather
@@ -72,41 +68,35 @@ than port 21. Cannot be used in combination with explicit FTP.`,
Default: false,
}, {
Name: "explicit_tls",
Help: `Use Explicit FTPS (FTP over TLS).
Help: `Use Explicit FTPS (FTP over TLS)
When using explicit FTP over TLS the client explicitly requests
security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTP.`,
Default: false,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0,
Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server.",
Help: "Do not verify the TLS certificate of the server",
Default: false,
Advanced: true,
}, {
Name: "disable_epsv",
Help: "Disable using EPSV even if server advertises support.",
Help: "Disable using EPSV even if server advertises support",
Default: false,
Advanced: true,
}, {
Name: "disable_mlsd",
Help: "Disable using MLSD even if server advertises support.",
Default: false,
Advanced: true,
}, {
Name: "writing_mdtm",
Help: "Use MDTM to set modification time (VsFtpd quirk)",
Help: "Disable using MLSD even if server advertises support",
Default: false,
Advanced: true,
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
Help: `Max time before closing idle connections.
Help: `Max time before closing idle connections
If no connections have been returned to the connection pool in the time
given, rclone will empty the connection pool.
@@ -114,48 +104,17 @@ given, rclone will empty the connection pool.
Set to 0 to keep connections indefinitely.
`,
Advanced: true,
}, {
Name: "close_timeout",
Help: "Maximum time to wait for a response to close.",
Default: fs.Duration(60 * time.Second),
Advanced: true,
}, {
Name: "tls_cache_size",
Help: `Size of TLS session cache for all control and data connections.
TLS cache allows to resume TLS sessions and reuse PSK between connections.
Increase if default size is not enough resulting in TLS resumption errors.
Enabled by default. Use 0 to disable.`,
Default: 32,
Advanced: true,
}, {
Name: "disable_tls13",
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
Default: false,
Advanced: true,
}, {
Name: "shut_timeout",
Help: "Maximum time to wait for data connection closing status.",
Default: fs.Duration(60 * time.Second),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// The FTP protocol can't handle trailing spaces
// (for instance, pureftpd turns them into '_')
// The FTP protocol can't handle trailing spaces (for instance
// pureftpd turns them into _)
//
// proftpd can't handle '*' in file names
// pureftpd can't handle '[', ']' or '*'
Default: (encoder.Display |
encoder.EncodeRightSpace),
Examples: []fs.OptionExample{{
Value: "Asterisk,Ctl,Dot,Slash",
Help: "ProFTPd can't handle '*' in file names",
}, {
Value: "BackSlash,Ctl,Del,Dot,RightSpace,Slash,SquareBracket",
Help: "PureFTPd can't handle '[]' or '*' in file names",
}, {
Value: "Ctl,LeftPeriod,Slash",
Help: "VsFTPd can't handle file names starting with dot",
}},
}},
})
}
@@ -168,16 +127,11 @@ type Options struct {
Port string `config:"port"`
TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"`
TLSCacheSize int `config:"tls_cache_size"`
DisableTLS13 bool `config:"disable_tls13"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
WritingMDTM bool `config:"writing_mdtm"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -197,10 +151,6 @@ type Fs struct {
drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser
tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections
fGetTime bool // true if the ftp library accepts GetTime
fSetTime bool // true if the ftp library accepts SetTime
fLstTime bool // true if the List call returns precise time
}
// Object describes an FTP file
@@ -215,7 +165,6 @@ type FileInfo struct {
Name string
Size uint64
ModTime time.Time
precise bool // true if the time is precise
IsDir bool
}
@@ -278,41 +227,29 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
return len(p), nil
}
// shouldRetry returns a boolean as to whether this err deserve to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
type dialCtx struct {
f *Fs
ctx context.Context
}
// dial a new connection with fshttp dialer
func (d *dialCtx) dial(network, address string) (net.Conn, error) {
conn, err := fshttp.NewDialer(d.ctx).Dial(network, address)
if err != nil {
return nil, err
}
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable:
return true, err
}
if d.f.tlsConf != nil {
conn = tls.Client(conn, d.f.tlsConf)
}
return fserrors.ShouldRetry(err), err
return conn, err
}
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
func (f *Fs) ftpConnection(ctx context.Context) (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server")
// Make ftp library dial with fshttp dialer optionally using TLS
dial := func(network, address string) (conn net.Conn, err error) {
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
if f.tlsConf != nil && err == nil {
conn = tls.Client(conn, f.tlsConf)
}
return
}
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf
// as a trigger for sending PSBZ and PROT options to server.
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
} else if f.opt.ExplicitTLS {
dCtx := dialCtx{f, ctx}
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dCtx.dial)}
if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
// Initial connection needs to be cleartext for explicit TLS
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
@@ -327,31 +264,21 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.DisableMLSD {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
}
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
}
if f.opt.WritingMDTM {
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
}
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
}
err = f.pacer.Call(func() (bool, error) {
c, err = ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil {
return shouldRetry(ctx, err)
}
err = c.Login(f.user, f.pass)
if err != nil {
_ = c.Quit()
return shouldRetry(ctx, err)
}
return false, nil
})
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil {
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Dial")
}
return c, err
err = c.Login(f.user, f.pass)
if err != nil {
_ = c.Quit()
fs.Errorf(f, "Error while Logging in into %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Login")
}
return c, nil
}
// Get an FTP connection from the pool, or open a new one
@@ -470,12 +397,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert,
}
if opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
}
if opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx)
@@ -490,7 +411,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
tlsConf: tlsConfig,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
@@ -504,12 +424,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if err != nil {
return nil, errors.Wrap(err, "NewFs")
}
f.fGetTime = c.IsGetTimeSupported()
f.fSetTime = c.IsSetTimeSupported()
f.fLstTime = c.IsTimePreciseInList()
if !f.fLstTime && f.fGetTime {
f.features.SlowModTime = true
}
f.putFtpConnection(&c, nil)
if root != "" {
// Check to see if the root actually an existing file
@@ -628,12 +542,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
fs: f,
remote: remote,
}
o.info = &FileInfo{
info := &FileInfo{
Name: remote,
Size: entry.Size,
ModTime: entry.Time,
precise: f.fLstTime,
}
o.info = info
return o, nil
}
return nil, fs.ErrorObjectNotFound
@@ -728,7 +643,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
Name: newremote,
Size: object.Size,
ModTime: object.Time,
precise: f.fLstTime,
}
o.info = info
entries = append(entries, o)
@@ -742,18 +656,8 @@ func (f *Fs) Hashes() hash.Set {
return 0
}
// Precision shows whether modified time is supported or not depending on the
// FTP server capabilities, namely whether FTP server:
// - accepts the MDTM command to get file time (fGetTime)
// or supports MLSD returning precise file time in the list (fLstTime)
// - accepts the MFMT command to set file time (fSetTime)
// or non-standard form of the MDTM command (fSetTime, too)
// used by VsFtpd for the same purpose (WritingMDTM)
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
// Precision shows Modified Time not supported
func (f *Fs) Precision() time.Duration {
if (f.fGetTime || f.fLstTime) && f.fSetTime {
return time.Second
}
return fs.ModTimeNotSupported
}
@@ -805,7 +709,6 @@ func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err erro
Name: remote,
Size: file.Size,
ModTime: file.Time,
precise: f.fLstTime,
IsDir: file.Type == ftp.EntryTypeFolder,
}
return info, nil
@@ -991,41 +894,12 @@ func (o *Object) Size() int64 {
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
if !o.info.precise && o.fs.fGetTime {
c, err := o.fs.getFtpConnection(ctx)
if err == nil {
path := path.Join(o.fs.root, o.remote)
path = o.fs.opt.Enc.FromStandardPath(path)
modTime, err := c.GetTime(path)
if err == nil && o.info != nil {
o.info.ModTime = modTime
o.info.precise = true
}
o.fs.putFtpConnection(&c, err)
}
}
return o.info.ModTime
}
// SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if !o.fs.fSetTime {
fs.Errorf(o.fs, "SetModTime is not supported")
return nil
}
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return err
}
path := path.Join(o.fs.root, o.remote)
path = o.fs.opt.Enc.FromStandardPath(path)
err = c.SetTime(path, modTime.In(time.UTC))
if err == nil && o.info != nil {
o.info.ModTime = modTime
o.info.precise = true
}
o.fs.putFtpConnection(&c, err)
return err
return nil
}
// Storable returns a boolean as to whether this object is storable
@@ -1057,12 +931,8 @@ func (f *ftpReadCloser) Close() error {
go func() {
errchan <- f.rc.Close()
}()
// Wait for Close for up to 60 seconds by default
closeTimeout := f.f.opt.CloseTimeout
if closeTimeout == 0 {
closeTimeout = fs.DurationOff
}
timer := time.NewTimer(time.Duration(closeTimeout))
// Wait for Close for up to 60 seconds
timer := time.NewTimer(60 * time.Second)
select {
case err = <-errchan:
timer.Stop()
@@ -1149,27 +1019,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.Wrap(err, "Update")
}
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
// Ignore error 250 here - send by some servers
if err != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
}
}
}
if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors
// recycle connection in advance to let remove() find free token
o.fs.putFtpConnection(nil, err)
remove()
o.fs.putFtpConnection(nil, err)
return errors.Wrap(err, "update stor")
}
o.fs.putFtpConnection(&c, nil)
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
return errors.Wrap(err, "SetModTime")
}
o.info, err = o.fs.getInfo(ctx, path)
if err != nil {
return errors.Wrap(err, "update getinfo")

View File

@@ -1,115 +0,0 @@
package ftp
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
configMap := configmap.Simple{}
for key, val := range opts {
configMap[key] = fmt.Sprintf("%v", val)
}
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), f.Root())
fixFs, err := fs.NewFs(ctx, remote)
require.NoError(t, err)
return fixFs
}
// test that big file uploads do not cause network i/o timeout
func (f *Fs) testUploadTimeout(t *testing.T) {
const (
fileSize = 100000000 // 100 MiB
idleTimeout = 40 * time.Millisecond // small because test server is local
maxTime = 5 * time.Second // prevent test hangup
)
if testing.Short() {
t.Skip("not running with -short")
}
ctx := context.Background()
ci := fs.GetConfig(ctx)
saveLowLevelRetries := ci.LowLevelRetries
saveTimeout := ci.Timeout
defer func() {
ci.LowLevelRetries = saveLowLevelRetries
ci.Timeout = saveTimeout
}()
ci.LowLevelRetries = 1
ci.Timeout = idleTimeout
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
fixFs := deriveFs(ctx, t, f, settings{
"concurrency": concurrency,
"shut_timeout": shutTimeout,
})
// Make test object
fileTime := fstest.Time("2020-03-08T09:30:00.000000000Z")
meta := object.NewStaticObjectInfo("upload-timeout.test", fileTime, int64(fileSize), true, nil, nil)
data := readers.NewPatternReader(int64(fileSize))
// Run upload and ensure maximum time
done := make(chan bool)
deadline := time.After(maxTime)
go func() {
obj, err = fixFs.Put(ctx, data, meta)
done <- true
}()
select {
case <-done:
case <-deadline:
t.Fatalf("Upload got stuck for %v !", maxTime)
}
return obj, err
}
// non-zero shut_timeout should fix i/o errors
obj, err := upload(f.opt.Concurrency, time.Second)
assert.NoError(t, err)
assert.NotNil(t, obj)
if obj != nil {
_ = obj.Remove(ctx)
}
}
// rclone must support precise time with ProFtpd and PureFtpd out of the box.
// The VsFtpd server does not support the MFMT command to set file time like
// other servers but by default supports the MDTM command in the non-standard
// two-argument form for the same purpose.
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
func (f *Fs) testTimePrecision(t *testing.T) {
name := f.Name()
if pos := strings.Index(name, "{"); pos != -1 {
name = name[:pos]
}
switch name {
case "TestFTPProftpd", "TestFTPPureftpd", "TestFTPVsftpd":
assert.LessOrEqual(t, f.Precision(), time.Second)
}
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("UploadTimeout", f.testUploadTimeout)
t.Run("TimePrecision", f.testTimePrecision)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -9,27 +9,25 @@ import (
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against rclone FTP server
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPRclone:",
NilObject: (*ftp.Object)(nil),
})
}
// TestIntegrationProftpd runs integration tests against proFTPd
func TestIntegrationProftpd(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPProftpd:",
NilObject: (*ftp.Object)(nil),
})
}
// TestIntegrationPureftpd runs integration tests against pureFTPd
func TestIntegrationPureftpd(t *testing.T) {
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPRclone:",
NilObject: (*ftp.Object)(nil),
})
}
func TestIntegration3(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
@@ -39,13 +37,12 @@ func TestIntegrationPureftpd(t *testing.T) {
})
}
// TestIntegrationVsftpd runs integration tests against vsFTPd
func TestIntegrationVsftpd(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPVsftpd:",
NilObject: (*ftp.Object)(nil),
})
}
// func TestIntegration4(t *testing.T) {
// if *fstest.RemoteName != "" {
// t.Skip("skipping as -remote is set")
// }
// fstests.Run(t, &fstests.Opt{
// RemoteName: "TestFTPVsftpd:",
// NilObject: (*ftp.Object)(nil),
// })
// }

View File

@@ -19,9 +19,9 @@ import (
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"path"
"strconv"
"strings"
"time"
@@ -51,10 +51,10 @@ import (
const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
timeFormat = time.RFC3339Nano
metaMtime = "mtime" // key to store mtime in metadata
metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata
listChunks = 1000 // chunk size to read directory listings
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 1000 // chunk size to read directory listings
minSleep = 10 * time.Millisecond
)
@@ -76,71 +76,72 @@ func init() {
Prefix: "gcs",
Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous")
if saFile != "" || saCreds != "" || anonymous == "true" {
return nil, nil
return
}
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: storageConfig,
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
}, {
Name: "anonymous",
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.",
Default: false,
}, {
Name: "object_acl",
Help: "Access Control List for new objects.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Object owner gets OWNER access.\nAll Authenticated Users get READER access.",
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.",
}, {
Value: "bucketOwnerFullControl",
Help: "Object owner gets OWNER access.\nProject team owners get OWNER access.",
Help: "Object owner gets OWNER access, and project team owners get OWNER access.",
}, {
Value: "bucketOwnerRead",
Help: "Object owner gets OWNER access.\nProject team owners get READER access.",
Help: "Object owner gets OWNER access, and project team owners get READER access.",
}, {
Value: "private",
Help: "Object owner gets OWNER access.\nDefault if left blank.",
Help: "Object owner gets OWNER access [default if left blank].",
}, {
Value: "projectPrivate",
Help: "Object owner gets OWNER access.\nProject team members get access according to their roles.",
Help: "Object owner gets OWNER access, and project team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Object owner gets OWNER access.\nAll Users get READER access.",
Help: "Object owner gets OWNER access, and all Users get READER access.",
}},
}, {
Name: "bucket_acl",
Help: "Access Control List for new buckets.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Project team owners get OWNER access.\nAll Authenticated Users get READER access.",
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.",
}, {
Value: "private",
Help: "Project team owners get OWNER access.\nDefault if left blank.",
Help: "Project team owners get OWNER access [default if left blank].",
}, {
Value: "projectPrivate",
Help: "Project team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Project team owners get OWNER access.\nAll Users get READER access.",
Help: "Project team owners get OWNER access, and all Users get READER access.",
}, {
Value: "publicReadWrite",
Help: "Project team owners get OWNER access.\nAll Users get WRITER access.",
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
}},
}, {
Name: "bucket_policy_only",
@@ -163,64 +164,64 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Help: "Location for the newly created buckets.",
Examples: []fs.OptionExample{{
Value: "",
Help: "Empty for default location (US)",
Help: "Empty for default location (US).",
}, {
Value: "asia",
Help: "Multi-regional location for Asia",
Help: "Multi-regional location for Asia.",
}, {
Value: "eu",
Help: "Multi-regional location for Europe",
Help: "Multi-regional location for Europe.",
}, {
Value: "us",
Help: "Multi-regional location for United States",
Help: "Multi-regional location for United States.",
}, {
Value: "asia-east1",
Help: "Taiwan",
Help: "Taiwan.",
}, {
Value: "asia-east2",
Help: "Hong Kong",
Help: "Hong Kong.",
}, {
Value: "asia-northeast1",
Help: "Tokyo",
Help: "Tokyo.",
}, {
Value: "asia-south1",
Help: "Mumbai",
Help: "Mumbai.",
}, {
Value: "asia-southeast1",
Help: "Singapore",
Help: "Singapore.",
}, {
Value: "australia-southeast1",
Help: "Sydney",
Help: "Sydney.",
}, {
Value: "europe-north1",
Help: "Finland",
Help: "Finland.",
}, {
Value: "europe-west1",
Help: "Belgium",
Help: "Belgium.",
}, {
Value: "europe-west2",
Help: "London",
Help: "London.",
}, {
Value: "europe-west3",
Help: "Frankfurt",
Help: "Frankfurt.",
}, {
Value: "europe-west4",
Help: "Netherlands",
Help: "Netherlands.",
}, {
Value: "us-central1",
Help: "Iowa",
Help: "Iowa.",
}, {
Value: "us-east1",
Help: "South Carolina",
Help: "South Carolina.",
}, {
Value: "us-east4",
Help: "Northern Virginia",
Help: "Northern Virginia.",
}, {
Value: "us-west1",
Help: "Oregon",
Help: "Oregon.",
}, {
Value: "us-west2",
Help: "California",
Help: "California.",
}},
}, {
Name: "storage_class",
@@ -328,10 +329,7 @@ func (f *Fs) Features() *fs.Features {
}
// shouldRetry determines whether a given err rates being retried
func shouldRetry(ctx context.Context, err error) (again bool, errOut error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(err error) (again bool, errOut error) {
again = false
if err != nil {
if fserrors.ShouldRetry(err) {
@@ -457,7 +455,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err == nil {
newRoot := path.Dir(f.root)
@@ -523,7 +521,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) {
objects, err = list.Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
@@ -626,7 +624,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) {
buckets, err = listBuckets.Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -752,7 +750,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
// service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err == nil {
// Bucket already exists
@@ -787,7 +785,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
_, err = insertBucket.Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
}, nil)
}
@@ -804,7 +802,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
return f.cache.Remove(bucket, func() error {
return f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
})
}
@@ -850,7 +848,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
for {
err = f.pacer.Call(func() (bool, error) {
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -921,7 +919,7 @@ func (o *Object) setMetaData(info *storage.Object) {
// read mtime out of metadata if available
mtimeString, ok := info.Metadata[metaMtime]
if ok {
modTime, err := time.Parse(timeFormat, mtimeString)
modTime, err := time.Parse(timeFormatIn, mtimeString)
if err == nil {
o.modTime = modTime
return
@@ -929,19 +927,8 @@ func (o *Object) setMetaData(info *storage.Object) {
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
}
// Fallback to GSUtil mtime
mtimeGsutilString, ok := info.Metadata[metaMtimeGsutil]
if ok {
unixTimeSec, err := strconv.ParseInt(mtimeGsutilString, 10, 64)
if err == nil {
o.modTime = time.Unix(unixTimeSec, 0)
return
}
fs.Debugf(o, "Failed to read GSUtil mtime from metadata: %s", err)
}
// Fallback to the Updated time
modTime, err := time.Parse(timeFormat, info.Updated)
modTime, err := time.Parse(timeFormatIn, info.Updated)
if err != nil {
fs.Logf(o, "Bad time decode: %v", err)
} else {
@@ -954,7 +941,7 @@ func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, er
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
@@ -998,8 +985,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// Returns metadata for an object
func metadataFromModTime(modTime time.Time) map[string]string {
metadata := make(map[string]string, 1)
metadata[metaMtime] = modTime.Format(timeFormat)
metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
metadata[metaMtime] = modTime.Format(timeFormatOut)
return metadata
}
@@ -1011,11 +997,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
return err
}
// Add the mtime to the existing metadata
mtime := modTime.Format(timeFormatOut)
if object.Metadata == nil {
object.Metadata = make(map[string]string, 1)
}
object.Metadata[metaMtime] = modTime.Format(timeFormat)
object.Metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
object.Metadata[metaMtime] = mtime
// Copy the object to itself to update the metadata
// Using PATCH requires too many permissions
bucket, bucketPath := o.split()
@@ -1026,7 +1012,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = copyObject.Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return err
@@ -1057,7 +1043,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
_ = res.Body.Close() // ignore error
}
}
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -1123,7 +1109,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = insertObject.Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return err
@@ -1138,7 +1124,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
return err
}

View File

@@ -8,6 +8,7 @@ import (
"encoding/json"
"fmt"
"io"
golog "log"
"net/http"
"net/url"
"path"
@@ -20,7 +21,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
@@ -55,7 +54,6 @@ const (
minSleep = 10 * time.Millisecond
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
scopeAccess = 2 // position of access scope in list
)
var (
@@ -64,7 +62,7 @@ var (
Scopes: []string{
"openid",
"profile",
scopeReadWrite, // this must be at position scopeAccess
scopeReadWrite,
},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
@@ -80,36 +78,36 @@ func init() {
Prefix: "gphotos",
Description: "Google Photos",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, errors.Wrap(err, "couldn't parse config into struct")
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
return
}
switch config.State {
case "":
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
} else {
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
}
return oauthutil.ConfigOut("warning", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
case "warning":
// Warn the user as required by google photos integration
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
IMPORTANT: All media items uploaded to Google Photos with rclone
are stored in full resolution at original quality. These uploads
will count towards storage in your Google Account.`)
case "warning_done":
return nil, nil
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes[0] = scopeReadOnly
} else {
oauthConfig.Scopes[0] = scopeReadWrite
}
return nil, fmt.Errorf("unknown state %q", config.State)
// Do the oauth
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
if err != nil {
golog.Fatalf("Failed to configure token: %v", err)
}
// Warn the user
fmt.Print(`
*** IMPORTANT: All media items uploaded to Google Photos with rclone
*** are stored in full resolution at original quality. These uploads
*** will count towards storage in your Google Account.
`)
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "read_only",
@@ -132,7 +130,7 @@ you want to read the media.`,
}, {
Name: "start_year",
Default: 2000,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year.`,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
Advanced: true,
}, {
Name: "include_archived",
@@ -151,24 +149,16 @@ listings and transferred.
Without this flag, archived media will not be visible in directory
listings and won't be transferred.`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}}...),
})
}
// Options defines the configuration for this backend
type Options struct {
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"`
Enc encoder.MultiEncoder `config:"encoding"`
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"`
}
// Fs represents a remote storage server
@@ -250,10 +240,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -342,7 +329,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
var openIDconfig map[string]interface{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return "", errors.Wrap(err, "couldn't read openID config")
@@ -371,7 +358,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read user info")
@@ -402,7 +389,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
var res interface{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't revoke token")
@@ -489,7 +476,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list albums")
@@ -506,9 +493,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
lastID = newAlbums[len(newAlbums)-1].ID
}
for i := range newAlbums {
anAlbum := newAlbums[i]
anAlbum.Title = f.opt.Enc.FromStandardPath(anAlbum.Title)
all.add(&anAlbum)
all.add(&newAlbums[i])
}
if result.NextPageToken == "" {
break
@@ -546,7 +531,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &filter, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list files")
@@ -690,7 +675,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create album")
@@ -825,7 +810,7 @@ func (o *Object) Size() int64 {
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
fs.Debugf(o, "Reading size failed: %v", err)
@@ -876,7 +861,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &item)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't get media item")
@@ -953,7 +938,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1008,10 +993,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
if err != nil {
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
}
token, err = rest.ReadBody(resp)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't upload file")
@@ -1039,7 +1024,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var result api.BatchCreateResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to create media item")
@@ -1084,7 +1069,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't delete item from album")

View File

@@ -1,179 +0,0 @@
package hasher
import (
"context"
"path"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/kv"
)
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "drop":
return nil, f.db.Stop(true)
case "dump", "fulldump":
return nil, f.dbDump(ctx, name == "fulldump", "")
case "import", "stickyimport":
sticky := name == "stickyimport"
if len(arg) != 2 {
return nil, errors.New("please provide checksum type and path to sum file")
}
return nil, f.dbImport(ctx, arg[0], arg[1], sticky)
default:
return nil, fs.ErrorCommandNotFound
}
}
var commandHelp = []fs.CommandHelp{{
Name: "drop",
Short: "Drop cache",
Long: `Completely drop checksum cache.
Usage Example:
rclone backend drop hasher:
`,
}, {
Name: "dump",
Short: "Dump the database",
Long: "Dump cache records covered by the current remote",
}, {
Name: "fulldump",
Short: "Full dump of the database",
Long: "Dump all cache records in the database",
}, {
Name: "import",
Short: "Import a SUM file",
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
Usage Example:
rclone backend import hasher:subdir md5 /path/to/sum.md5
`,
}, {
Name: "stickyimport",
Short: "Perform fast import of a SUM file",
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
Usage Example:
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
`,
}}
func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
if root == "" {
remoteFs, err := cache.Get(ctx, f.opt.Remote)
if err != nil {
return err
}
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
}
op := &kvDump{
full: full,
root: root,
path: f.db.Path(),
fs: f,
}
err := f.db.Do(false, op)
if err == kv.ErrEmpty {
fs.Infof(op.path, "empty")
err = nil
}
return err
}
func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bool) error {
var hashType hash.Type
if err := hashType.Set(hashName); err != nil {
return err
}
if hashType == hash.None {
return errors.New("please provide a valid hash type")
}
if !f.suppHashes.Contains(hashType) {
return errors.New("unsupported hash type")
}
if !f.keepHashes.Contains(hashType) {
fs.Infof(nil, "Need not import hashes of this type")
return nil
}
_, sumPath, err := fspath.SplitFs(sumRemote)
if err != nil {
return err
}
sumFs, err := cache.Get(ctx, sumRemote)
switch err {
case fs.ErrorIsFile:
// ok
case nil:
return errors.Errorf("not a file: %s", sumRemote)
default:
return err
}
sumObj, err := sumFs.NewObject(ctx, path.Base(sumPath))
if err != nil {
return errors.Wrap(err, "cannot open sum file")
}
hashes, err := operations.ParseSumFile(ctx, sumObj)
if err != nil {
return errors.Wrap(err, "failed to parse sum file")
}
if sticky {
rootPath := f.Fs.Root()
for remote, hashVal := range hashes {
key := path.Join(rootPath, remote)
hashSums := operations.HashSums{hashName: hashVal}
if err := f.putRawHashes(ctx, key, anyFingerprint, hashSums); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
}
}
fs.Infof(nil, "Summary: %d checksum(s) imported", len(hashes))
return nil
}
const longImportThreshold = 100
if len(hashes) > longImportThreshold {
fs.Infof(nil, "Importing %d checksums. Please wait...", len(hashes))
}
doneCount := 0
err = operations.ListFn(ctx, f, func(obj fs.Object) {
remote := obj.Remote()
hash := hashes[remote]
hashes[remote] = "" // mark as handled
o, ok := obj.(*Object)
if ok && hash != "" {
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
}
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
doneCount++
}
})
if err != nil {
fs.Errorf(nil, "Import failed: %v", err)
}
skipCount := 0
for remote, emptyOrDone := range hashes {
if emptyOrDone != "" {
fs.Infof(nil, "Skip vanished object: %s", remote)
skipCount++
}
}
fs.Infof(nil, "Summary: %d imported, %d skipped", doneCount, skipCount)
return err
}

View File

@@ -1,508 +0,0 @@
// Package hasher implements a checksum handling overlay backend
package hasher
import (
"context"
"encoding/gob"
"fmt"
"io"
"path"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/kv"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "hasher",
Description: "Better checksums for other remotes",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Required: true,
Help: "Remote to cache checksums for (e.g. myRemote:path).",
}, {
Name: "hashes",
Default: fs.CommaSepList{"md5", "sha1"},
Advanced: false,
Help: "Comma separated list of supported checksum types.",
}, {
Name: "max_age",
Advanced: false,
Default: fs.DurationOff,
Help: "Maximum time to keep checksums in cache (0 = no cache, off = cache forever).",
}, {
Name: "auto_size",
Advanced: true,
Default: fs.SizeSuffix(0),
Help: "Auto-update checksum for files smaller than this size (disabled by default).",
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
Hashes fs.CommaSepList `config:"hashes"`
AutoSize fs.SizeSuffix `config:"auto_size"`
MaxAge fs.Duration `config:"max_age"`
}
// Fs represents a wrapped fs.Fs
type Fs struct {
fs.Fs
name string
root string
wrapper fs.Fs
features *fs.Features
opt *Options
db *kv.DB
// fingerprinting
fpTime bool // true if using time in fingerprints
fpHash hash.Type // hash type to use in fingerprints or None
// hash types triaged by groups
suppHashes hash.Set // all supported checksum types
passHashes hash.Set // passed directly to the base without caching
slowHashes hash.Set // passed to the base and then cached
autoHashes hash.Set // calculated in-house and cached
keepHashes hash.Set // checksums to keep in cache (slow + auto)
}
var warnExperimental sync.Once
// NewFs constructs an Fs from the remote:path string
func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs.Fs, error) {
if !kv.Supported() {
return nil, errors.New("hasher is not supported on this OS")
}
warnExperimental.Do(func() {
fs.Infof(nil, "Hasher is EXPERIMENTAL!")
})
opt := &Options{}
err := configstruct.Set(cmap, opt)
if err != nil {
return nil, err
}
if strings.HasPrefix(opt.Remote, fsname+":") {
return nil, errors.New("can't point remote at itself")
}
remotePath := fspath.JoinRootPath(opt.Remote, rpath)
baseFs, err := cache.Get(ctx, remotePath)
if err != nil && err != fs.ErrorIsFile {
return nil, errors.Wrapf(err, "failed to derive base remote %q", opt.Remote)
}
f := &Fs{
Fs: baseFs,
name: fsname,
root: rpath,
opt: opt,
}
baseFeatures := baseFs.Features()
f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported
if baseFeatures.SlowHash {
f.slowHashes = f.Fs.Hashes()
} else {
f.passHashes = f.Fs.Hashes()
f.fpHash = f.passHashes.GetOne()
}
f.suppHashes = f.passHashes
f.suppHashes.Add(f.slowHashes.Array()...)
for _, hashName := range opt.Hashes {
var ht hash.Type
if err := ht.Set(hashName); err != nil {
return nil, errors.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
}
if !f.slowHashes.Contains(ht) {
f.autoHashes.Add(ht)
}
f.keepHashes.Add(ht)
f.suppHashes.Add(ht)
}
fs.Debugf(f, "Groups by usage: cached %s, passed %s, auto %s, slow %s, supported %s",
f.keepHashes, f.passHashes, f.autoHashes, f.slowHashes, f.suppHashes)
var nilSet hash.Set
if f.keepHashes == nilSet {
return nil, errors.New("configured hash_names have nothing to keep in cache")
}
if f.opt.MaxAge > 0 {
gob.Register(hashRecord{})
db, err := kv.Start(ctx, "hasher", f.Fs)
if err != nil {
return nil, err
}
f.db = db
}
stubFeatures := &fs.Features{
CanHaveEmptyDirectories: true,
IsLocal: true,
ReadMimeType: true,
WriteMimeType: true,
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
cache.PinUntilFinalized(f.Fs, f)
return f, err
}
//
// Filesystem
//
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string { return f.name }
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string { return f.root }
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features { return f.features }
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set { return f.suppHashes }
// String returns a description of the FS
// The "hasher::" prefix is a distinctive feature.
func (f *Fs) String() string {
return fmt.Sprintf("hasher::%s:%s", f.name, f.root)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs { return f.Fs }
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs { return f.wrapper }
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper }
// Wrap base entries into hasher entries.
func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries, err error) {
hashEntries = baseEntries[:0] // work inplace
for _, entry := range baseEntries {
switch x := entry.(type) {
case fs.Object:
hashEntries = append(hashEntries, f.wrapObject(x, nil))
default:
hashEntries = append(hashEntries, entry) // trash in - trash out
}
}
return hashEntries, nil
}
// List the objects and directories in dir into entries.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if entries, err = f.Fs.List(ctx, dir); err != nil {
return nil, err
}
return f.wrapEntries(entries)
}
// ListR lists the objects and directories recursively into out.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
return f.Fs.Features().ListR(ctx, dir, func(baseEntries fs.DirEntries) error {
hashEntries, err := f.wrapEntries(baseEntries)
if err != nil {
return err
}
return callback(hashEntries)
})
}
// Purge a directory
func (f *Fs) Purge(ctx context.Context, dir string) error {
if do := f.Fs.Features().Purge; do != nil {
if err := do(ctx, dir); err != nil {
return err
}
err := f.db.Do(true, &kvPurge{
dir: path.Join(f.Fs.Root(), dir),
})
if err != nil {
fs.Errorf(f, "Failed to purge some hashes: %v", err)
}
return nil
}
return fs.ErrorCantPurge
}
// PutStream uploads to the remote path with undeterminate size.
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if do := f.Fs.Features().PutStream; do != nil {
_ = f.pruneHash(src.Remote())
oResult, err := do(ctx, in, src, options...)
return f.wrapObject(oResult, err), err
}
return nil, errors.New("PutStream not supported")
}
// PutUnchecked uploads the object, allowing duplicates.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if do := f.Fs.Features().PutUnchecked; do != nil {
_ = f.pruneHash(src.Remote())
oResult, err := do(ctx, in, src, options...)
return f.wrapObject(oResult, err), err
}
return nil, errors.New("PutUnchecked not supported")
}
// pruneHash deletes hash for a path
func (f *Fs) pruneHash(remote string) error {
return f.db.Do(true, &kvPrune{
key: path.Join(f.Fs.Root(), remote),
})
}
// CleanUp the trash in the Fs
func (f *Fs) CleanUp(ctx context.Context) error {
if do := f.Fs.Features().CleanUp; do != nil {
return do(ctx)
}
return errors.New("CleanUp not supported")
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if do := f.Fs.Features().About; do != nil {
return do(ctx)
}
return nil, errors.New("About not supported")
}
// ChangeNotify calls the passed function with a path that has had changes.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
if do := f.Fs.Features().ChangeNotify; do != nil {
do(ctx, notifyFunc, pollIntervalChan)
}
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
if do := f.Fs.Features().UserInfo; do != nil {
return do(ctx)
}
return nil, fs.ErrorNotImplemented
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
if do := f.Fs.Features().Disconnect; do != nil {
return do(ctx)
}
return fs.ErrorNotImplemented
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if do := f.Fs.Features().MergeDirs; do != nil {
return do(ctx, dirs)
}
return errors.New("MergeDirs not supported")
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
if do := f.Fs.Features().DirCacheFlush; do != nil {
do()
}
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
if do := f.Fs.Features().PublicLink; do != nil {
return do(ctx, remote, expire, unlink)
}
return "", errors.New("PublicLink not supported")
}
// Copy src to this remote using server-side copy operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
o, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantCopy
}
oResult, err := do(ctx, o.Object, remote)
return f.wrapObject(oResult, err), err
}
// Move src to this remote using server-side move operations.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Move
if do == nil {
return nil, fs.ErrorCantMove
}
o, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantMove
}
oResult, err := do(ctx, o.Object, remote)
if err != nil {
return nil, err
}
_ = f.db.Do(true, &kvMove{
src: path.Join(f.Fs.Root(), src.Remote()),
dst: path.Join(f.Fs.Root(), remote),
dir: false,
fs: f,
})
return f.wrapObject(oResult, nil), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
do := f.Fs.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
srcFs, ok := src.(*Fs)
if !ok {
return fs.ErrorCantDirMove
}
err := do(ctx, srcFs.Fs, srcRemote, dstRemote)
if err == nil {
_ = f.db.Do(true, &kvMove{
src: path.Join(srcFs.Fs.Root(), srcRemote),
dst: path.Join(f.Fs.Root(), dstRemote),
dir: true,
fs: f,
})
}
return err
}
// Shutdown the backend, closing any background tasks and any cached connections.
func (f *Fs) Shutdown(ctx context.Context) (err error) {
err = f.db.Stop(false)
if do := f.Fs.Features().Shutdown; do != nil {
if err2 := do(ctx); err2 != nil {
err = err2
}
}
return
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(ctx, remote)
return f.wrapObject(o, err), err
}
//
// Object
//
// Object represents a composite file wrapping one or more data chunks
type Object struct {
fs.Object
f *Fs
}
// Wrap base object into hasher object
func (f *Fs) wrapObject(o fs.Object, err error) *Object {
if err != nil || o == nil {
return nil
}
return &Object{Object: o, f: f}
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info { return o.f }
// UnWrap returns the wrapped Object
func (o *Object) UnWrap() fs.Object { return o.Object }
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Object.String()
}
// ID returns the ID of the Object if possible
func (o *Object) ID() string {
if doer, ok := o.Object.(fs.IDer); ok {
return doer.ID()
}
return ""
}
// GetTier returns the Tier of the Object if possible
func (o *Object) GetTier() string {
if doer, ok := o.Object.(fs.GetTierer); ok {
return doer.GetTier()
}
return ""
}
// SetTier set the Tier of the Object if possible
func (o *Object) SetTier(tier string) error {
if doer, ok := o.Object.(fs.SetTierer); ok {
return doer.SetTier(tier)
}
return errors.New("SetTier not supported")
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
if doer, ok := o.Object.(fs.MimeTyper); ok {
return doer.MimeType(ctx)
}
return ""
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.GetTierer = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -1,78 +0,0 @@
package hasher
import (
"context"
"fmt"
"os"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/kv"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
item := fstest.Item{Path: name, ModTime: mtime1}
_, o := fstests.PutTestContents(ctx, t, f, &item, data, true)
require.NotNil(t, o)
return o
}
func (f *Fs) testUploadFromCrypt(t *testing.T) {
// make a temporary local remote
tempRoot, err := fstest.LocalRemote()
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempRoot)
}()
// make a temporary crypt remote
ctx := context.Background()
pass := obscure.MustObscure("crypt")
remote := fmt.Sprintf(":crypt,remote=%s,password=%s:", tempRoot, pass)
cryptFs, err := fs.NewFs(ctx, remote)
require.NoError(t, err)
// make a test file on the crypt remote
const dirName = "from_crypt_1"
const fileName = dirName + "/file_from_crypt_1"
const longTime = fs.ModTimeNotSupported
src := putFile(ctx, t, cryptFs, fileName, "doggy froggy")
// ensure that hash does not exist yet
_ = f.pruneHash(fileName)
hashType := f.keepHashes.GetOne()
hash, err := f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
assert.Error(t, err)
assert.Empty(t, hash)
// upload file to hasher
in, err := src.Open(ctx)
require.NoError(t, err)
dst, err := f.Put(ctx, in, src)
require.NoError(t, err)
assert.NotNil(t, dst)
// check that hash was created
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
assert.NoError(t, err)
assert.NotEmpty(t, hash)
//t.Logf("hash is %q", hash)
_ = operations.Purge(ctx, f, dirName)
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
if !kv.Supported() {
t.Skip("hasher is not supported on this OS")
}
t.Run("UploadFromCrypt", f.testUploadFromCrypt)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1,38 +0,0 @@
package hasher_test
import (
"os"
"path/filepath"
"testing"
"github.com/rclone/rclone/backend/hasher"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/kv"
_ "github.com/rclone/rclone/backend/all" // for integration tests
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if !kv.Supported() {
t.Skip("hasher is not supported on this OS")
}
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*hasher.Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
},
UnimplementableObjectMethods: []string{},
}
if *fstest.RemoteName == "" {
tempDir := filepath.Join(os.TempDir(), "rclone-hasher-test")
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: "TestHasher", Key: "type", Value: "hasher"},
{Name: "TestHasher", Key: "remote", Value: tempDir},
}
opt.RemoteName = "TestHasher:"
}
fstests.Run(t, &opt)
}

View File

@@ -1,315 +0,0 @@
package hasher
import (
"bytes"
"context"
"encoding/gob"
"fmt"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/kv"
)
const (
timeFormat = "2006-01-02T15:04:05.000000000-0700"
anyFingerprint = "*"
)
type hashMap map[hash.Type]string
type hashRecord struct {
Fp string // fingerprint
Hashes operations.HashSums
Created time.Time
}
func (r *hashRecord) encode(key string) ([]byte, error) {
var buf bytes.Buffer
if err := gob.NewEncoder(&buf).Encode(r); err != nil {
fs.Debugf(key, "hasher encoding %v: %v", r, err)
return nil, err
}
return buf.Bytes(), nil
}
func (r *hashRecord) decode(key string, data []byte) error {
if err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(r); err != nil {
fs.Debugf(key, "hasher decoding %q failed: %v", data, err)
return err
}
return nil
}
// kvPrune: prune a single hash
type kvPrune struct {
key string
}
func (op *kvPrune) Do(ctx context.Context, b kv.Bucket) error {
return b.Delete([]byte(op.key))
}
// kvPurge: delete a subtree
type kvPurge struct {
dir string
}
func (op *kvPurge) Do(ctx context.Context, b kv.Bucket) error {
dir := op.dir
if !strings.HasSuffix(dir, "/") {
dir += "/"
}
var items []string
cur := b.Cursor()
bkey, _ := cur.Seek([]byte(dir))
for bkey != nil {
key := string(bkey)
if !strings.HasPrefix(key, dir) {
break
}
items = append(items, key[len(dir):])
bkey, _ = cur.Next()
}
nerr := 0
for _, sub := range items {
if err := b.Delete([]byte(dir + sub)); err != nil {
nerr++
}
}
fs.Debugf(dir, "%d hashes purged, %d failed", len(items)-nerr, nerr)
return nil
}
// kvMove: assign hashes to new path
type kvMove struct {
src string
dst string
dir bool
fs *Fs
}
func (op *kvMove) Do(ctx context.Context, b kv.Bucket) error {
src, dst := op.src, op.dst
if !op.dir {
err := moveHash(b, src, dst)
fs.Debugf(op.fs, "moving cached hash %s to %s (err: %v)", src, dst, err)
return err
}
if !strings.HasSuffix(src, "/") {
src += "/"
}
if !strings.HasSuffix(dst, "/") {
dst += "/"
}
var items []string
cur := b.Cursor()
bkey, _ := cur.Seek([]byte(src))
for bkey != nil {
key := string(bkey)
if !strings.HasPrefix(key, src) {
break
}
items = append(items, key[len(src):])
bkey, _ = cur.Next()
}
nerr := 0
for _, suffix := range items {
srcKey, dstKey := src+suffix, dst+suffix
err := moveHash(b, srcKey, dstKey)
fs.Debugf(op.fs, "Rename cache record %s -> %s (err: %v)", srcKey, dstKey, err)
if err != nil {
nerr++
}
}
fs.Debugf(op.fs, "%d hashes moved, %d failed", len(items)-nerr, nerr)
return nil
}
func moveHash(b kv.Bucket, src, dst string) error {
data := b.Get([]byte(src))
err := b.Delete([]byte(src))
if err != nil || len(data) == 0 {
return err
}
return b.Put([]byte(dst), data)
}
// kvGet: get single hash from database
type kvGet struct {
key string
fp string
hash string
val string
age time.Duration
}
func (op *kvGet) Do(ctx context.Context, b kv.Bucket) error {
data := b.Get([]byte(op.key))
if len(data) == 0 {
return errors.New("no record")
}
var r hashRecord
if err := r.decode(op.key, data); err != nil {
return errors.New("invalid record")
}
if !(r.Fp == anyFingerprint || op.fp == anyFingerprint || r.Fp == op.fp) {
return errors.New("fingerprint changed")
}
if time.Since(r.Created) > op.age {
return errors.New("record timed out")
}
if r.Hashes != nil {
op.val = r.Hashes[op.hash]
}
return nil
}
// kvPut: set hashes for an object by key
type kvPut struct {
key string
fp string
hashes operations.HashSums
age time.Duration
}
func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
data := b.Get([]byte(op.key))
var r hashRecord
if len(data) > 0 {
err = r.decode(op.key, data)
if err != nil || r.Fp != op.fp || time.Since(r.Created) > op.age {
r.Hashes = nil
}
}
if len(r.Hashes) == 0 {
r.Created = time.Now()
r.Hashes = operations.HashSums{}
r.Fp = op.fp
}
for hashType, hashVal := range op.hashes {
r.Hashes[hashType] = hashVal
}
if data, err = r.encode(op.key); err != nil {
return errors.Wrap(err, "marshal failed")
}
if err = b.Put([]byte(op.key), data); err != nil {
return errors.Wrap(err, "put failed")
}
return err
}
// kvDump: dump the database.
// Note: long dump can cause concurrent operations to fail.
type kvDump struct {
full bool
root string
path string
fs *Fs
num int
total int
}
func (op *kvDump) Do(ctx context.Context, b kv.Bucket) error {
f, baseRoot, dbPath := op.fs, op.root, op.path
if op.full {
total := 0
num := 0
_ = b.ForEach(func(bkey, data []byte) error {
total++
key := string(bkey)
include := (baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/"))
var r hashRecord
if err := r.decode(key, data); err != nil {
fs.Errorf(nil, "%s: invalid record: %v", key, err)
return nil
}
fmt.Println(f.dumpLine(&r, key, include, nil))
if include {
num++
}
return nil
})
fs.Infof(dbPath, "%d records out of %d", num, total)
op.num, op.total = num, total // for unit tests
return nil
}
num := 0
cur := b.Cursor()
var bkey, data []byte
if baseRoot != "" {
bkey, data = cur.Seek([]byte(baseRoot))
} else {
bkey, data = cur.First()
}
for bkey != nil {
key := string(bkey)
if !(baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/")) {
break
}
var r hashRecord
if err := r.decode(key, data); err != nil {
fs.Errorf(nil, "%s: invalid record: %v", key, err)
continue
}
if key = strings.TrimPrefix(key[len(baseRoot):], "/"); key == "" {
key = "/"
}
fmt.Println(f.dumpLine(&r, key, true, nil))
num++
bkey, data = cur.Next()
}
fs.Infof(dbPath, "%d records", num)
op.num = num // for unit tests
return nil
}
func (f *Fs) dumpLine(r *hashRecord, path string, include bool, err error) string {
var status string
switch {
case !include:
status = "ext"
case err != nil:
status = "bad"
case r.Fp == anyFingerprint:
status = "stk"
default:
status = "ok "
}
var hashes []string
for _, hashType := range f.keepHashes.Array() {
hashName := hashType.String()
hashVal := r.Hashes[hashName]
if hashVal == "" || err != nil {
hashVal = "-"
}
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType), hashVal)
hashes = append(hashes, hashName+":"+hashVal)
}
hashesStr := strings.Join(hashes, " ")
age := time.Since(r.Created).Round(time.Second)
if age > 24*time.Hour {
age = age.Round(time.Hour)
}
if err != nil {
age = 0
}
ageStr := age.String()
if strings.HasSuffix(ageStr, "h0m0s") {
ageStr = strings.TrimSuffix(ageStr, "0m0s")
}
return fmt.Sprintf("%s %s %9s %s", status, hashesStr, ageStr, path)
}

View File

@@ -1,305 +0,0 @@
package hasher
import (
"context"
"fmt"
"io"
"io/ioutil"
"path"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
)
// obtain hash for an object
func (o *Object) getHash(ctx context.Context, hashType hash.Type) (string, error) {
maxAge := time.Duration(o.f.opt.MaxAge)
if maxAge <= 0 {
return "", nil
}
fp := o.fingerprint(ctx)
if fp == "" {
return "", errors.New("fingerprint failed")
}
return o.f.getRawHash(ctx, hashType, o.Remote(), fp, maxAge)
}
// obtain hash for a path
func (f *Fs) getRawHash(ctx context.Context, hashType hash.Type, remote, fp string, age time.Duration) (string, error) {
key := path.Join(f.Fs.Root(), remote)
op := &kvGet{
key: key,
fp: fp,
hash: hashType.String(),
age: age,
}
err := f.db.Do(false, op)
return op.val, err
}
// put new hashes for an object
func (o *Object) putHashes(ctx context.Context, rawHashes hashMap) error {
if o.f.opt.MaxAge <= 0 {
return nil
}
fp := o.fingerprint(ctx)
if fp == "" {
return nil
}
key := path.Join(o.f.Fs.Root(), o.Remote())
hashes := operations.HashSums{}
for hashType, hashVal := range rawHashes {
hashes[hashType.String()] = hashVal
}
return o.f.putRawHashes(ctx, key, fp, hashes)
}
// set hashes for a path without any validation
func (f *Fs) putRawHashes(ctx context.Context, key, fp string, hashes operations.HashSums) error {
return f.db.Do(true, &kvPut{
key: key,
fp: fp,
hashes: hashes,
age: time.Duration(f.opt.MaxAge),
})
}
// Hash returns the selected checksum of the file or "" if unavailable.
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (hashVal string, err error) {
f := o.f
if f.passHashes.Contains(hashType) {
fs.Debugf(o, "pass %s", hashType)
return o.Object.Hash(ctx, hashType)
}
if !f.suppHashes.Contains(hashType) {
fs.Debugf(o, "unsupp %s", hashType)
return "", hash.ErrUnsupported
}
if hashVal, err = o.getHash(ctx, hashType); err != nil {
fs.Debugf(o, "getHash: %v", err)
err = nil
hashVal = ""
}
if hashVal != "" {
fs.Debugf(o, "cached %s = %q", hashType, hashVal)
return hashVal, nil
}
if f.slowHashes.Contains(hashType) {
fs.Debugf(o, "slow %s", hashType)
hashVal, err = o.Object.Hash(ctx, hashType)
if err == nil && hashVal != "" && f.keepHashes.Contains(hashType) {
if err = o.putHashes(ctx, hashMap{hashType: hashVal}); err != nil {
fs.Debugf(o, "putHashes: %v", err)
err = nil
}
}
return hashVal, err
}
if f.autoHashes.Contains(hashType) && o.Size() < int64(f.opt.AutoSize) {
_ = o.updateHashes(ctx)
if hashVal, err = o.getHash(ctx, hashType); err != nil {
fs.Debugf(o, "auto %s = %q (%v)", hashType, hashVal, err)
err = nil
}
}
return hashVal, err
}
// updateHashes performs implicit "rclone hashsum --download" and updates cache.
func (o *Object) updateHashes(ctx context.Context) error {
r, err := o.Open(ctx)
if err != nil {
fs.Infof(o, "update failed (open): %v", err)
return err
}
defer func() {
_ = r.Close()
}()
if _, err = io.Copy(ioutil.Discard, r); err != nil {
fs.Infof(o, "update failed (copy): %v", err)
return err
}
return nil
}
// Update the object with the given data, time and size.
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
_ = o.f.pruneHash(src.Remote())
return o.Object.Update(ctx, in, src, options...)
}
// Remove an object.
func (o *Object) Remove(ctx context.Context) error {
_ = o.f.pruneHash(o.Remote())
return o.Object.Remove(ctx)
}
// SetModTime sets the modification time of the file.
// Also prunes the cache entry when modtime changes so that
// touching a file will trigger checksum recalculation even
// on backends that don't provide modTime with fingerprint.
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
if mtime != o.Object.ModTime(ctx) {
_ = o.f.pruneHash(o.Remote())
}
return o.Object.SetModTime(ctx, mtime)
}
// Open opens the file for read.
// Full reads will also update object hashes.
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadCloser, err error) {
size := o.Size()
var offset, limit int64 = 0, -1
for _, option := range options {
switch opt := option.(type) {
case *fs.SeekOption:
offset = opt.Offset
case *fs.RangeOption:
offset, limit = opt.Decode(size)
}
}
if offset < 0 {
return nil, errors.New("invalid offset")
}
if limit < 0 {
limit = size - offset
}
if r, err = o.Object.Open(ctx, options...); err != nil {
return nil, err
}
if offset != 0 || limit < size {
// It's a partial read
return r, err
}
return o.f.newHashingReader(ctx, r, func(sums hashMap) {
if err := o.putHashes(ctx, sums); err != nil {
fs.Infof(o, "auto hashing error: %v", err)
}
})
}
// Put data into the remote path with given modTime and size
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
var (
o *Object
common hash.Set
rehash bool
hashes hashMap
)
if fsrc := src.Fs(); fsrc != nil {
common = fsrc.Hashes().Overlap(f.keepHashes)
// Rehash if source does not have all required hashes or hashing is slow
rehash = fsrc.Features().SlowHash || common != f.keepHashes
}
wrapIn := in
if rehash {
r, err := f.newHashingReader(ctx, in, func(sums hashMap) {
hashes = sums
})
fs.Debugf(src, "Rehash in-fly due to incomplete or slow source set %v (err: %v)", common, err)
if err == nil {
wrapIn = r
} else {
rehash = false
}
}
_ = f.pruneHash(src.Remote())
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
o = f.wrapObject(oResult, err)
if o == nil {
return nil, err
}
if !rehash {
hashes = hashMap{}
for _, ht := range common.Array() {
if h, e := src.Hash(ctx, ht); e == nil && h != "" {
hashes[ht] = h
}
}
}
if len(hashes) > 0 {
err := o.putHashes(ctx, hashes)
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
}
return o, err
}
type hashingReader struct {
rd io.Reader
hasher *hash.MultiHasher
fun func(hashMap)
}
func (f *Fs) newHashingReader(ctx context.Context, rd io.Reader, fun func(hashMap)) (*hashingReader, error) {
hasher, err := hash.NewMultiHasherTypes(f.keepHashes)
if err != nil {
return nil, err
}
hr := &hashingReader{
rd: rd,
hasher: hasher,
fun: fun,
}
return hr, nil
}
func (r *hashingReader) Read(p []byte) (n int, err error) {
n, err = r.rd.Read(p)
if err != nil && err != io.EOF {
r.hasher = nil
}
if r.hasher != nil {
if _, errHash := r.hasher.Write(p[:n]); errHash != nil {
r.hasher = nil
err = errHash
}
}
if err == io.EOF && r.hasher != nil {
r.fun(r.hasher.Sums())
r.hasher = nil
}
return
}
func (r *hashingReader) Close() error {
if rc, ok := r.rd.(io.ReadCloser); ok {
return rc.Close()
}
return nil
}
// Return object fingerprint or empty string in case of errors
//
// Note that we can't use the generic `fs.Fingerprint` here because
// this fingerprint is used to pick _derived hashes_ that are slow
// to calculate or completely unsupported by the base remote.
//
// The hasher fingerprint must be based on `fsHash`, the first _fast_
// hash supported _by the underlying remote_ (if there is one),
// while `fs.Fingerprint` would select a hash _produced by hasher_
// creating unresolvable fingerprint loop.
func (o *Object) fingerprint(ctx context.Context) string {
size := o.Object.Size()
timeStr := "-"
if o.f.fpTime {
timeStr = o.Object.ModTime(ctx).UTC().Format(timeFormat)
if timeStr == "" {
return ""
}
}
hashStr := "-"
if o.f.fpHash != hash.None {
var err error
hashStr, err = o.Object.Hash(ctx, o.f.fpHash)
if hashStr == "" || err != nil {
return ""
}
}
return fmt.Sprintf("%d,%s,%s", size, timeStr, hashStr)
}

View File

@@ -1,4 +1,3 @@
//go:build !plan9
// +build !plan9
package hdfs

View File

@@ -1,4 +1,3 @@
//go:build !plan9
// +build !plan9
package hdfs
@@ -19,28 +18,35 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "namenode",
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Help: "hadoop name node and port",
Required: true,
Examples: []fs.OptionExample{{
Value: "namenode:8020",
Help: "Connect to host namenode at port 8020",
}},
}, {
Name: "username",
Help: "Hadoop user name.",
Help: "hadoop user name",
Required: false,
Examples: []fs.OptionExample{{
Value: "root",
Help: "Connect to hdfs as root.",
Help: "Connect to hdfs as root",
}},
}, {
Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode.
Help: `Kerberos service principal name for the namenode
Enables KERBEROS authentication. Specifies the Service Principal Name
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
(<SERVICE>/<FQDN>) for the namenode.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "hdfs/namenode.hadoop.docker",
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
}},
Advanced: true,
}, {
Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
Help: `Kerberos data transfer protection: authentication|integrity|privacy
Specifies whether or not authentication, data signature integrity
checks, and wire encryption is required when communicating the the

View File

@@ -1,6 +1,5 @@
// Test HDFS filesystem interface
//go:build !plan9
// +build !plan9
package hdfs_test

View File

@@ -1,7 +1,6 @@
// Build for hdfs for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9
// +build plan9
package hdfs

View File

@@ -1,4 +1,3 @@
//go:build !plan9
// +build !plan9
package hdfs
@@ -110,7 +109,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
dirname := path.Dir(realpath)
fs.Debugf(o.fs, "update [%s]", realpath)
err := o.fs.client.MkdirAll(dirname, 0755)
err := o.fs.client.MkdirAll(dirname, 755)
if err != nil {
return err
}

View File

@@ -38,13 +38,20 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "url",
Help: "URL of http host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
Help: "URL of http host to connect to",
Required: true,
Examples: []fs.OptionExample{{
Value: "https://example.com",
Help: "Connect to example.com",
}, {
Value: "https://user:pass@example.com",
Help: "Connect to example.com using a username and password",
}},
}, {
Name: "headers",
Help: `Set HTTP headers for all transactions.
Help: `Set HTTP headers for all transactions
Use this to set additional HTTP headers for all transactions.
Use this to set additional HTTP headers for all transactions
The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used.
@@ -57,7 +64,7 @@ You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'
Advanced: true,
}, {
Name: "no_slash",
Help: `Set this if the site doesn't end directories with /.
Help: `Set this if the site doesn't end directories with /
Use this if your target website does not use / on the end of
directories.
@@ -73,7 +80,7 @@ directories.`,
Advanced: true,
}, {
Name: "no_head",
Help: `Don't use HEAD requests to find file sizes in dir listing.
Help: `Don't use HEAD requests to find file sizes in dir listing
If your site is being very slow to load then you can try this option.
Normally rclone does a HEAD request for each potential file in a

View File

@@ -15,7 +15,7 @@ import (
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/rest"
@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
ts := httptest.NewServer(handler)
// Configure the remote
configfile.Install()
config.LoadConfig(context.Background())
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true

View File

@@ -11,6 +11,7 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"time"
@@ -55,10 +56,11 @@ func init() {
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "hubic", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
})

View File

@@ -368,7 +368,6 @@ type JottaFile struct {
XMLName xml.Name
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
PublicURI string `xml:"publicURI"`
PublicSharePath string `xml:"publicSharePath"`
State string `xml:"currentRevision>state"`
CreatedAt Time `xml:"currentRevision>created"`

File diff suppressed because it is too large Load Diff

View File

@@ -32,29 +32,29 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "endpoint",
Help: "The Koofr API endpoint to use.",
Help: "The Koofr API endpoint to use",
Default: "https://app.koofr.net",
Required: true,
Advanced: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
Required: false,
Default: "",
Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Required: true,
Advanced: true,
}, {
Name: "user",
Help: "Your Koofr user name.",
Help: "Your Koofr user name",
Required: true,
}, {
Name: "password",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
IsPassword: true,
Required: true,
}, {
@@ -344,7 +344,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err e
return nil, translateErrorsObject(err)
}
if info.Type == "dir" {
return nil, fs.ErrorIsDir
return nil, fs.ErrorNotAFile
}
return &Object{
fs: f,
@@ -534,7 +534,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return nil
}
// About reports space usage (with a MiB precision)
// About reports space usage (with a MB precision)
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
mount, err := f.client.MountsDetails(f.mountID)
if err != nil {
@@ -608,25 +608,5 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if err != nil {
return "", translateErrorsDir(err)
}
// URL returned by API looks like following:
//
// https://app.koofr.net/links/35d9fb92-74a3-4930-b4ed-57f123bfb1a6
//
// Direct url looks like following:
//
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
//
// I am not sure about meaning of "path" parameter; in my expriments
// it is always "%2F", and omitting it or putting any other value
// results in 404.
//
// There is one more quirk: direct link to file in / returns that file,
// direct link to file somewhere else in hierarchy returns zip archive
// with one member.
link := linkData.URL
link = strings.ReplaceAll(link, "/links", "/content/links")
link += "/files/get?path=%2F"
return link, nil
return linkData.ShortURL, nil
}

View File

@@ -1,4 +1,3 @@
//go:build darwin || dragonfly || freebsd || linux
// +build darwin dragonfly freebsd linux
package local

View File

@@ -1,4 +1,3 @@
//go:build windows
// +build windows
package local

View File

@@ -0,0 +1,11 @@
//+build darwin
package local
import "github.com/rclone/rclone/lib/encoder"
// This is the encoding used by the local backend for macOS
//
// macOS can't store invalid UTF-8, it converts them into %XX encoding
const defaultEnc = (encoder.Base |
encoder.EncodeInvalidUtf8)

View File

@@ -0,0 +1,8 @@
//+build !windows,!darwin
package local
import "github.com/rclone/rclone/lib/encoder"
// This is the encoding used by the local backend for non windows platforms
const defaultEnc = encoder.Base

View File

@@ -1,9 +1,10 @@
//go:build windows
// +build windows
//+build windows
package encoder
package local
// OS is the encoding used by the local backend for windows platforms
import "github.com/rclone/rclone/lib/encoder"
// This is the encoding used by the local backend for windows platforms
//
// List of replaced characters:
// < (less than) -> '' // FULLWIDTH LESS-THAN SIGN
@@ -23,10 +24,10 @@ package encoder
// Also encode invalid UTF-8 bytes as Go can't convert them to UTF-16.
//
// https://docs.microsoft.com/de-de/windows/desktop/FileIO/naming-a-file#naming-conventions
const OS = (Base |
EncodeWin |
EncodeBackSlash |
EncodeCtl |
EncodeRightSpace |
EncodeRightPeriod |
EncodeInvalidUtf8)
const defaultEnc = (encoder.Base |
encoder.EncodeWin |
encoder.EncodeBackSlash |
encoder.EncodeCtl |
encoder.EncodeRightSpace |
encoder.EncodeRightPeriod |
encoder.EncodeInvalidUtf8)

View File

@@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
//+build !linux
package local

View File

@@ -1,5 +1,4 @@
//go:build linux
// +build linux
//+build linux
package local

View File

@@ -1,4 +1,3 @@
//go:build windows || plan9 || js
// +build windows plan9 js
package local

View File

@@ -1,4 +1,3 @@
//go:build !windows && !plan9 && !js
// +build !windows,!plan9,!js
package local

View File

@@ -27,7 +27,6 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/text/unicode/norm"
)
// Constants
@@ -43,12 +42,11 @@ func init() {
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows.",
Advanced: runtime.GOOS != "windows",
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names.",
Help: "Disables long file names",
}},
}, {
Name: "copy_links",
@@ -59,7 +57,7 @@ func init() {
Advanced: true,
}, {
Name: "links",
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension",
Default: false,
NoPrefix: true,
ShortOpt: "l",
@@ -67,7 +65,6 @@ func init() {
}, {
Name: "skip_links",
Help: `Don't warn about skipped symlinks.
This flag disables warning messages on skipped symlinks or junction
points, as you explicitly acknowledge that they should be skipped.`,
Default: false,
@@ -75,39 +72,30 @@ points, as you explicitly acknowledge that they should be skipped.`,
Advanced: true,
}, {
Name: "zero_size_links",
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
Help: `Assume the Stat size of links is zero (and read them instead)
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0.
However, on unix it reads as the length of the text in the link. This may cause errors like this when
syncing:
- Windows
- On some virtual filesystems (such ash LucidLink)
- Android
Failed to copy: corrupted on transfer: sizes differ 0 vs 13
So rclone now always reads the link.
`,
Setting this flag causes rclone to read the link and use that as the size of the link
instead of 0 which in most cases fixes the problem.`,
Default: false,
Advanced: true,
}, {
Name: "unicode_normalization",
Help: `Apply unicode NFC normalization to paths and filenames.
Name: "no_unicode_normalization",
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
This flag can be used to normalize file names into unicode NFC form
that are read from the local filesystem.
Rclone does not normally touch the encoding of file names it reads from
the file system.
This can be useful when using macOS as it normally provides decomposed (NFD)
unicode which in some language (eg Korean) doesn't display properly on
some OSes.
Note that rclone compares filenames with unicode normalization in the sync
routine so this flag shouldn't normally be used.`,
This flag is deprecated now. Rclone no longer normalizes unicode file
names, but it compares them with unicode normalization in the sync
routine instead.`,
Default: false,
Advanced: true,
}, {
Name: "no_check_updated",
Help: `Don't check to see if the files change during upload.
Help: `Don't check to see if the files change during upload
Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy
@@ -153,7 +141,7 @@ to override the default choice.`,
Advanced: true,
}, {
Name: "case_insensitive",
Help: `Force the filesystem to report itself as case insensitive.
Help: `Force the filesystem to report itself as case insensitive
Normally the local backend declares itself as case insensitive on
Windows/macOS and case sensitive for everything else. Use this flag
@@ -162,7 +150,7 @@ to override the default choice.`,
Advanced: true,
}, {
Name: "no_preallocate",
Help: `Disable preallocation of disk space for transferred files.
Help: `Disable preallocation of disk space for transferred files
Preallocation of disk space helps prevent filesystem fragmentation.
However, some virtual filesystem layers (such as Google Drive File
@@ -173,7 +161,7 @@ Use this flag to disable preallocation.`,
Advanced: true,
}, {
Name: "no_sparse",
Help: `Disable sparse files for multi-thread downloads.
Help: `Disable sparse files for multi-thread downloads
On Windows platforms rclone will make sparse files when doing
multi-thread downloads. This avoids long pauses on large files where
@@ -183,7 +171,7 @@ cause disk fragmentation and can be slow to work with.`,
Advanced: true,
}, {
Name: "no_set_modtime",
Help: `Disable setting modtime.
Help: `Disable setting modtime
Normally rclone updates modification time of files after they are done
uploading. This can cause permissions issues on Linux platforms when
@@ -196,7 +184,7 @@ enabled, rclone will no longer update the modtime after copying a file.`,
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: encoder.OS,
Default: defaultEnc,
}},
}
fs.Register(fsi)
@@ -207,7 +195,8 @@ type Options struct {
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
UTFNorm bool `config:"unicode_normalization"`
ZeroSizeLinks bool `config:"zero_size_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
@@ -266,6 +255,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errLinksAndCopyLinks
}
if opt.NoUTFNorm {
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
}
f := &Fs{
name: name,
opt: *opt,
@@ -402,7 +395,7 @@ func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, erro
}
if o.mode.IsDir() {
return nil, fs.ErrorIsDir
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
}
return o, nil
}
@@ -468,10 +461,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for _, name := range names {
namepath := filepath.Join(fsDirPath, name)
fi, fierr := os.Lstat(namepath)
if os.IsNotExist(fierr) {
// skip entry removed by a concurrent goroutine
continue
}
if fierr != nil {
err = errors.Wrapf(err, "failed to read directory %q", namepath)
fs.Errorf(dir, "%v", fierr)
@@ -532,9 +521,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
if f.opt.UTFNorm {
filename = norm.NFC.String(filename)
}
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
if !utf8.ValidString(filename) {
@@ -570,8 +556,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
localPath := f.localPath(dir)
err := file.MkdirAll(localPath, 0777)
err := os.MkdirAll(localPath, 0777)
if err != nil {
return err
}
@@ -765,7 +752,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Create parent of destination
dstParentPath := filepath.Dir(dstPath)
err = file.MkdirAll(dstParentPath, 0777)
err = os.MkdirAll(dstParentPath, 0777)
if err != nil {
return err
}
@@ -1099,7 +1086,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// mkdirAll makes all the directories needed to store the object
func (o *Object) mkdirAll() error {
dir := filepath.Dir(o.path)
return file.MkdirAll(dir, 0777)
return os.MkdirAll(dir, 0777)
}
type nopWriterCloser struct {
@@ -1157,10 +1144,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
err = file.PreAllocate(src.Size(), f)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
if err == file.ErrDiskFull {
_ = f.Close()
return err
}
}
}
out = f
@@ -1279,13 +1262,9 @@ func (o *Object) setMetadata(info os.FileInfo) {
o.modTime = info.ModTime()
o.mode = info.Mode()
o.fs.objectMetaMu.Unlock()
// Read the size of the link.
//
// The value in info.Size() is not always correct
// - Windows links read as 0 size
// - Some virtual filesystems (such ash LucidLink) links read as 0 size
// - Android - some versions the links are larger than readlink suggests
if o.translatedLink {
// On Windows links read as 0 size so set the correct size here
// Optionally, users can turn this feature on with the zero_size_links flag
if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink {
linkdst, err := os.Readlink(o.path)
if err != nil {
fs.Errorf(o, "Failed to read link size: %v", err)

View File

@@ -1,6 +1,5 @@
// Device reading functions
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package local

View File

@@ -1,6 +1,5 @@
// Device reading functions
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package local

View File

@@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
//+build !windows
package local

View File

@@ -1,5 +1,4 @@
//go:build windows
// +build windows
//+build windows
package local

View File

@@ -1,4 +1,3 @@
//go:build !windows && !plan9 && !js
// +build !windows,!plan9,!js
package local

View File

@@ -1,4 +1,3 @@
//go:build windows || plan9 || js
// +build windows plan9 js
package local

Some files were not shown because too many files have changed in this diff Show More