1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-25 20:53:28 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
87d64e7fb4 mount: use the equivalent of kernel_cache by default #FIXME WIP 2018-07-11 14:56:17 +01:00
13010 changed files with 8718388 additions and 260505 deletions

46
.appveyor.yml Normal file
View File

@@ -0,0 +1,46 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\ncw\rclone
environment:
GOPATH: C:\gopath
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
ORIGPATH: '%PATH%'
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
PATH: '%PATHCC64%'
RCLONE_CONFIG_PASS:
secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0=
install:
- choco install winfsp -y
- choco install zip -y
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
build_script:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go install
- go build
- make log_since_last_release > %TEMP%\git-log.txt
- make version > %TEMP%\version
- set /p RCLONE_VERSION=<%TEMP%\version
- set PATH=%PATHCC32%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
- set PATH=%PATHCC64%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
test_script:
- make GOTAGS=cmount quicktest
artifacts:
- path: rclone.exe
- path: build/*-v*.zip
deploy_script:
- IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload

34
.circleci/config.yml Normal file
View File

@@ -0,0 +1,34 @@
version: 2
jobs:
build:
machine: true
working_directory: ~/.go_workspace/src/github.com/ncw/rclone
steps:
- checkout
- run:
name: Cross-compile rclone
command: |
docker pull billziss/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
--image=billziss/xgo-cgofuse \
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
.
xgo \
--targets=android/*,ios/* \
.
- run:
name: Prepare artifacts
command: |
mkdir -p /tmp/rclone.dist
cp -R rclone-* /tmp/rclone.dist
- store_artifacts:
path: /tmp/rclone.dist

7
.gitattributes vendored
View File

@@ -1,7 +0,0 @@
# Ignore generated files in GitHub language statistics and diffs
/MANUAL.* linguist-generated=true
/rclone.1 linguist-generated=true
# Don't fiddle with the line endings of test data
**/testdata/** -text
**/test/** -text

4
.github/FUNDING.yml vendored
View File

@@ -1,4 +0,0 @@
github: [ncw]
patreon: njcw
liberapay: ncw
custom: ["https://rclone.org/donate/"]

View File

@@ -1,31 +0,0 @@
<!--
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
https://forum.rclone.org/
instead of filing an issue for a quick response.
If you are reporting a bug or asking for a new feature then please use one of the templates here:
https://github.com/rclone/rclone/issues/new
otherwise fill in the form below.
Thank you
The Rclone Developers
-->
#### Output of `rclone version`
#### Describe the issue

View File

@@ -1,50 +0,0 @@
---
name: Bug report
about: Report a problem with rclone
---
<!--
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
https://forum.rclone.org/
instead of filing an issue for a quick response.
If you think you might have found a bug, please can you try to replicate it with the latest beta?
https://beta.rclone.org/
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
Thank you
The Rclone Developers
-->
#### What is the problem you are having with rclone?
#### What is your rclone version (output from `rclone version`)
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which cloud storage system are you using? (eg Google Drive)
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)

View File

@@ -1,36 +0,0 @@
---
name: Feature request
about: Suggest a new feature or enhancement for rclone
---
<!--
Welcome :-)
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
Here is a checklist of things to do:
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
2. Discuss on the forum first: https://forum.rclone.org/
3. Make a feature request issue (this is the right place!).
4. Be prepared to get involved making the feature :-)
Looking forward to your great idea!
The Rclone Developers
-->
#### What is your current rclone version (output from `rclone version`)?
#### What problem are you are trying to solve?
#### How do you think rclone should be changed to solve that?

View File

@@ -1,5 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Rclone Forum Community Support
url: https://forum.rclone.org/
about: Please ask and answer questions here.

View File

@@ -1,29 +0,0 @@
<!--
Thank you very much for contributing code or documentation to rclone! Please
fill out the following questions to make it easier for us to review your
changes.
You do not need to check all the boxes below all at once, feel free to take
your time and add more commits. If you're done and ready for review, please
check the last box.
-->
#### What is the purpose of this change?
<!--
Describe the changes here
-->
#### Was the change discussed in an issue or in the forum before?
<!--
Link issues and relevant forum posts here.
-->
#### Checklist
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
- [ ] I have added tests for all changes in this PR if appropriate.
- [ ] I have added documentation for the changes if appropriate.
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
- [ ] I'm done, this Pull Request is ready for review :-)

View File

@@ -1,256 +0,0 @@
---
# Github Actions build for rclone
# -*- compile-command: "yamllint -f parsable build.yml" -*-
name: build
# Trigger the workflow on push or pull request
on:
push:
branches:
- '*'
tags:
- '*'
pull_request:
jobs:
build:
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'go1.11', 'go1.12', 'go1.13', 'go1.14']
include:
- job_name: linux
os: ubuntu-latest
go: '1.15.x'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
racequicktest: true
deploy: true
- job_name: mac
os: macOS-latest
go: '1.15.x'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_amd64
os: windows-latest
go: '1.15.x'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.15.x'
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '1.15.x'
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
compile_all: true
deploy: true
- job_name: go1.11
os: ubuntu-latest
go: '1.11.x'
quicktest: true
- job_name: go1.12
os: ubuntu-latest
go: '1.12.x'
quicktest: true
- job_name: go1.13
os: ubuntu-latest
go: '1.13.x'
quicktest: true
- job_name: go1.14
os: ubuntu-latest
go: '1.14.x'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v2
with:
stable: 'false'
go-version: ${{ matrix.go }}
- name: Set environment variables
shell: bash
run: |
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
- name: Install Libraries on Linux
shell: bash
run: |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
shell: bash
run: |
brew update
brew cask install osxfuse
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
shell: powershell
run: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GOARCH -eq "386") {
choco install -y mingw --forcex86 --force
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
$path = (get-command mingw32-make.exe).Path
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
if: matrix.os == 'windows-latest'
- name: Print Go version and environment
shell: bash
run: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
printf "\n\nGo environment:\n\n"
go env
printf "\n\nRclone environment:\n\n"
make vars
printf "\n\nSystem environment:\n\n"
env
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Build rclone
shell: bash
run: |
make
- name: Run tests
shell: bash
run: |
make quicktest
if: matrix.quicktest
- name: Race test
shell: bash
run: |
make racequicktest
if: matrix.racequicktest
- name: Code quality test
shell: bash
run: |
make build_dep
make check
if: matrix.check
- name: Compile all architectures test
shell: bash
run: |
make
make compile_all
if: matrix.compile_all
- name: Deploy built binaries
shell: bash
run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
make ci_beta
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)'
# Deploy binaries if enabled in config && not a PR && not a fork
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
xgo:
timeout-minutes: 60
name: "xgo cross compile"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v1
with:
# Checkout into a fixed path to avoid import path problems on go < 1.11
path: ./src/github.com/rclone/rclone
- name: Set environment variables
shell: bash
run: |
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
- name: Cross-compile rclone
run: |
docker pull billziss/xgo-cgofuse
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
# xgo \
# -image=billziss/xgo-cgofuse \
# -targets=darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
# -tags cmount \
# -dest build \
# .
xgo \
-image=billziss/xgo-cgofuse \
-targets=android/*,ios/* \
-dest build \
.
- name: Build rclone
shell: bash
run: |
make
- name: Upload artifacts
run: |
make ci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -1,25 +0,0 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish image
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -1,33 +0,0 @@
name: Docker release build
on:
release:
types: [published]
jobs:
build:
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Get actual patch version
id: actual_patch_version
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
- name: Get actual minor version
id: actual_minor_version
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Build and publish image
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
with:
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

5
.gitignore vendored
View File

@@ -5,8 +5,3 @@ build
docs/public docs/public
rclone.iml rclone.iml
.idea .idea
.history
*.test
*.log
*.iml
fuzz-build.zip

View File

@@ -1,26 +0,0 @@
# golangci-lint configuration options
linters:
enable:
- deadcode
- errcheck
- goimports
- golint
- ineffassign
- structcheck
- varcheck
- govet
- unconvert
#- prealloc
#- maligned
disable-all: true
issues:
# Enable some lints excluded by default
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0

14
.gometalinter.json Normal file
View File

@@ -0,0 +1,14 @@
{
"Enable": [
"deadcode",
"errcheck",
"goimports",
"golint",
"ineffassign",
"structcheck",
"varcheck",
"vet"
],
"EnableGC": true,
"Vendor": true
}

2
.pkgr.yml Normal file
View File

@@ -0,0 +1,2 @@
default_dependencies: false
cli: rclone

50
.travis.yml Normal file
View File

@@ -0,0 +1,50 @@
language: go
sudo: required
dist: trusty
os:
- linux
go:
- 1.7.6
- 1.8.7
- 1.9.3
- "1.10.1"
- tip
before_install:
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
install:
- git fetch --unshallow --tags
- make vars
- make build_dep
script:
- make check
- make quicktest
- make compile_all
env:
global:
- GOTAGS=cmount
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
addons:
apt:
packages:
- fuse
- libfuse-dev
- rpm
- pkg-config
matrix:
allow_failures:
- go: tip
include:
- os: osx
go: "1.10.1"
env: GOTAGS=""
deploy:
provider: script
script: make travis_beta
skip_cleanup: true
on:
all_branches: true
go: "1.10.1"
condition: $TRAVIS_PULL_REQUEST == false

View File

@@ -21,20 +21,20 @@ with the [latest beta of rclone](https://beta.rclone.org/):
## Submitting a pull request ## ## Submitting a pull request ##
If you find a bug that you'd like to fix, or a new feature that you'd If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub. like to implement then please submit a pull request via Github.
If it is a big feature then make an issue first so it can be discussed. If it is a big feature then make an issue first so it can be discussed.
You'll need a Go environment set up with GOPATH set. See [the Go You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info. getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's GitHub First in your web browser press the fork button on [rclone's Github
page](https://github.com/rclone/rclone). page](https://github.com/ncw/rclone).
Now in your terminal Now in your terminal
go get -u github.com/rclone/rclone go get github.com/ncw/rclone
cd $GOPATH/src/github.com/rclone/rclone cd $GOPATH/src/github.com/ncw/rclone
git remote rename origin upstream git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git git remote add origin git@github.com:YOURUSER/rclone.git
@@ -64,34 +64,22 @@ packages which you can install with
Make sure you Make sure you
* Add [documentation](#writing-documentation) for a new feature. * Add documentation for a new feature (see below for where)
* Follow the [commit message guidelines](#commit-messages). * Add unit tests for a new feature
* Add [unit tests](#testing) for a new feature
* squash commits down to one per feature * squash commits down to one per feature
* rebase to master with `git rebase master` * rebase to master `git rebase master`
When you are done with that When you are done with that
git push origin my-new-feature git push origin my-new-feature
Go to the GitHub website and click [Create pull Go to the Github website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/). request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff. You patch will get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running: If so, then make the changes in the same branch, squash the commits,
``` rebase it to master then push it to Github with `--force`.
git log # See how many commits you want to squash
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
git commit # Add a new commit message.
git push --force # Push the squashed commit to your GitHub repo.
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also reccommends wizardzines.com
```
## CI for your fork ##
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
## Testing ## ## Testing ##
@@ -121,24 +109,17 @@ but they can be run against any of the remotes.
cd fs/sync cd fs/sync
go test -v -remote TestDrive: go test -v -remote TestDrive:
go test -v -remote TestDrive: -fast-list go test -v -remote TestDrive: -subdir
cd fs/operations cd fs/operations
go test -v -remote TestDrive: go test -v -remote TestDrive:
If you want to use the integration test framework to run these tests
all together with an HTML report and test retries then from the
project root:
go install github.com/rclone/rclone/fstest/test_all
test_all -backend drive
If you want to run all the integration tests against all the remotes, If you want to run all the integration tests against all the remotes,
then change into the project root and run then change into the project root and run
make test make test
This command is run daily on the integration test server. You can This command is run daily on the the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/ find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ## ## Code Organisation ##
@@ -155,7 +136,6 @@ with modules beneath.
* ...commands * ...commands
* docs - the documentation and website * docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated * content - adjust these docs only - everything else is autogenerated
* command - these are auto generated - edit the corresponding .go file
* fs - main rclone definitions - minimal amount of code * fs - main rclone definitions - minimal amount of code
* accounting - bandwidth limiting and statistics * accounting - bandwidth limiting and statistics
* asyncreader - an io.Reader which reads ahead * asyncreader - an io.Reader which reads ahead
@@ -165,7 +145,7 @@ with modules beneath.
* fserrors - rclone specific error handling * fserrors - rclone specific error handling
* fshttp - http handling for rclone * fshttp - http handling for rclone
* fspath - path handling for rclone * fspath - path handling for rclone
* hash - defines rclone's hash types and functions * hash - defines rclones hash types and functions
* list - list a remote * list - list a remote
* log - logging facilities * log - logging facilities
* march - iterates directories in lock step * march - iterates directories in lock step
@@ -186,20 +166,17 @@ with modules beneath.
* pacer - retries with backoff and paces operations * pacer - retries with backoff and paces operations
* readers - a selection of useful io.Readers * readers - a selection of useful io.Readers
* rest - a thin abstraction over net/http for REST * rest - a thin abstraction over net/http for REST
* vendor - 3rd party code managed by the dep tool
* vfs - Virtual FileSystem layer for implementing rclone mount and similar * vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation ## ## Writing Documentation ##
If you are adding a new feature then please update the documentation. If you are adding a new feature then please update the documentation.
If you add a new general flag (not for a backend), then document it in If you add a new flag, then if it is a general flag, document it in
`docs/content/docs.md` - the flags there are supposed to be in `docs/content/docs.md` - the flags there are supposed to be in
alphabetical order. alphabetical order. If it is a remote specific flag, then document it
in `docs/content/remote.md`.
If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field. The first line of this is used
for the flag help, the remainder is shown to the user in `rclone
config` and is added to the docs with `make backenddocs`.
The only documentation you need to edit are the `docs/content/*.md` The only documentation you need to edit are the `docs/content/*.md`
files. The MANUAL.*, rclone.1, web site etc are all auto generated files. The MANUAL.*, rclone.1, web site etc are all auto generated
@@ -210,9 +187,6 @@ don't need to run these when adding a feature.
Documentation for rclone sub commands is with their code, eg Documentation for rclone sub commands is with their code, eg
`cmd/ls/ls.go`. `cmd/ls/ls.go`.
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy.
## Making a release ## ## Making a release ##
There are separate instructions for making a release in the RELEASE.md There are separate instructions for making a release in the RELEASE.md
@@ -221,20 +195,14 @@ file.
## Commit messages ## ## Commit messages ##
Please make the first line of your commit message a summary of the Please make the first line of your commit message a summary of the
change that a user (not a developer) of rclone would like to read, and change, and prefix it with the directory of the change followed by a
prefix it with the directory of the change followed by a colon. The colon. The changelog gets made by looking at just these first lines
changelog gets made by looking at just these first lines so make it so make it good!
good!
If you have more to say about the commit, then enter a blank line and If you have more to say about the commit, then enter a blank line and
carry on the description. Remember to say why the change was needed - carry on the description. Remember to say why the change was needed -
the commit itself shows what was changed. the commit itself shows what was changed.
Writing more is better than less. Comparing the behaviour before the
change to that after the change is very useful. Imagine you are
writing to yourself in 12 months time when you've forgotten everything
about what you just did and you need to get up to speed quickly.
If the change fixes an issue then write `Fixes #1234` in the commit If the change fixes an issue then write `Fixes #1234` in the commit
message. This can be on the subject line if it will fit. If you message. This can be on the subject line if it will fit. If you
don't want to close the associated issue just put `#1234` and the don't want to close the associated issue just put `#1234` and the
@@ -261,37 +229,37 @@ Fixes #1498
## Adding a dependency ## ## Adding a dependency ##
rclone uses the [go rclone uses the [dep](https://github.com/golang/dep) tool to manage
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more) its dependencies. All code that rclone needs for building is stored
support in go1.11 and later to manage its dependencies. in the `vendor` directory for perfectly reproducable builds.
rclone can be built with modules outside of the GOPATH The `vendor` directory is entirely managed by the `dep` tool.
To add a dependency `github.com/ncw/new_dependency` see the To add a new dependency, run `dep ensure` and `dep` will pull in the
instructions below. These will fetch the dependency and add it to new dependency to the `vendor` directory and update the `Gopkg.lock`
`go.mod` and `go.sum`. file.
GO111MODULE=on go get github.com/ncw/new_dependency You can add constraints on that package in the `Gopkg.toml` file (see
the `dep` documentation), but don't unless you really need to.
You can add constraints on that package when doing `go get` (see the Please check in the changes generated by `dep` including the `vendor`
go docs linked above), but don't unless you really need to. directory and `Godep.toml` and `Godep.lock` in a single commit
separate from any other code changes. Watch out for new files in
Please check in the changes generated by `go mod` including `go.mod` `vendor`.
and `go.sum` in the same commit as your other changes.
## Updating a dependency ## ## Updating a dependency ##
If you need to update a dependency then run If you need to update a dependency then run
GO111MODULE=on go get -u github.com/pkg/errors dep ensure -update github.com/pkg/errors
Check in a single commit as above. Check in in a single commit as above.
## Updating all the dependencies ## ## Updating all the dependencies ##
In order to update all the dependencies then run `make update`. This In order to update all the dependencies then run `make update`. This
just uses the go modules to update all the modules to their latest just runs `dep ensure -update`. Check in the changes in a single
stable release. Check in the changes in a single commit as above. commit as above.
This should be done early in the release cycle to pick up new versions This should be done early in the release cycle to pick up new versions
of packages in time for them to get some testing. of packages in time for them to get some testing.
@@ -331,11 +299,6 @@ Getting going
* Add your remote to the imports in `backend/all/all.go` * Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead. * HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable. * Try to implement as many optional methods as possible as it makes the remote more usable.
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `rclone purge -v TestRemote:rclone-info`
* `rclone info --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine
Unit tests Unit tests
@@ -345,71 +308,26 @@ Unit tests
Integration tests Integration tests
* Add your backend to `fstest/test_all/config.yaml` * Add your fs to `fstest/test_all/test_all.go`
* Once you've done that then you can use the integration test framework from the project root:
* go install ./...
* test_all -backends remote
Or if you want to run the integration tests manually:
* Make sure integration tests pass with * Make sure integration tests pass with
* `cd fs/operations` * `cd fs/operations`
* `go test -v -remote TestRemote:` * `go test -v -remote TestRemote:`
* `cd fs/sync` * `cd fs/sync`
* `go test -v -remote TestRemote:` * `go test -v -remote TestRemote:`
* If your remote defines `ListR` check with this also * If you are making a bucket based remote, then check with this also
* `go test -v -remote TestRemote: -subdir`
* And if your remote defines `ListR` this also
* `go test -v -remote TestRemote: -fast-list` * `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests. See the [testing](#testing) section for more information on integration tests.
Add your fs to the docs - you'll need to pick an icon for it from Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
alphabetical order of full name of remote (eg `drive` is ordered as
`Google Drive`) but with the local file system last.
* `README.md` - main GitHub page * `README.md` - main Github page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`) * `docs/content/remote.md` - main docs page
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
* update them with `make backenddocs` - revert any changes in other backends
* `docs/content/overview.md` - overview docs * `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section * `docs/content/docs.md` - list of remotes in config section
* `docs/content/_index.md` - front page of rclone.org * `docs/content/about.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation * `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant * `bin/make_manual.py` - add the page to the `docs` constant
* `cmd/cmd.go` - the main help for rclone
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
## Writing a plugin ##
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
Usage
- Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
- `KIND` should be one of `backend`, `command` or `bundle`.
- Example: A plugin with backend support for PiFS would be called
`librcloneplugin_backend_pifs.so`.
- Loading
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
- Supported on rclone v1.50 or greater.
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone)
Building
To turn your existing additions into a Go plugin, move them to an external repository
and change the top-level package name to `main`.
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)

View File

@@ -1,22 +0,0 @@
FROM golang AS builder
COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/
RUN \
CGO_ENABLED=0 \
make
RUN ./rclone version
# Begin final image
FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse tzdata && \
echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
ENTRYPOINT [ "rclone" ]
WORKDIR /data
ENV XDG_CONFIG_HOME=/config

490
Gopkg.lock generated Normal file
View File

@@ -0,0 +1,490 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
name = "bazil.org/fuse"
packages = [
".",
"fs",
"fuseutil"
]
revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e"
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata"]
revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479"
version = "v0.23.0"
[[projects]]
name = "github.com/Azure/azure-sdk-for-go"
packages = [
"storage",
"version"
]
revision = "fbe7db0e3f9793ba3e5704efbab84f51436c136e"
version = "v18.0.0"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = [
"autorest",
"autorest/adal",
"autorest/azure",
"autorest/date"
]
revision = "1f7cd6cfe0adea687ad44a512dfe76140f804318"
version = "v10.12.0"
[[projects]]
branch = "master"
name = "github.com/Unknwon/goconfig"
packages = ["."]
revision = "ef1e4c783f8f0478bd8bff0edb3dd0bade552599"
[[projects]]
name = "github.com/VividCortex/ewma"
packages = ["."]
revision = "b24eb346a94c3ba12c1da1e564dbac1b498a77ce"
version = "v1.1.1"
[[projects]]
branch = "master"
name = "github.com/a8m/tree"
packages = ["."]
revision = "3cf936ce15d6100c49d9c75f79c220ae7e579599"
[[projects]]
name = "github.com/abbot/go-http-auth"
packages = ["."]
revision = "0ddd408d5d60ea76e320503cc7dd091992dee608"
version = "v0.4.0"
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
"aws/awserr",
"aws/awsutil",
"aws/client",
"aws/client/metadata",
"aws/corehandlers",
"aws/credentials",
"aws/credentials/ec2rolecreds",
"aws/credentials/endpointcreds",
"aws/credentials/stscreds",
"aws/csm",
"aws/defaults",
"aws/ec2metadata",
"aws/endpoints",
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/sdkio",
"internal/sdkrand",
"internal/shareddefaults",
"private/protocol",
"private/protocol/eventstream",
"private/protocol/eventstream/eventstreamapi",
"private/protocol/query",
"private/protocol/query/queryutil",
"private/protocol/rest",
"private/protocol/restxml",
"private/protocol/xml/xmlutil",
"service/s3",
"service/s3/s3iface",
"service/s3/s3manager",
"service/sts"
]
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
version = "v1.14.8"
[[projects]]
name = "github.com/billziss-gh/cgofuse"
packages = ["fuse"]
revision = "ea66f9809c71af94522d494d3d617545662ea59d"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/coreos/bbolt"
packages = ["."]
revision = "af9db2027c98c61ecd8e17caa5bd265792b9b9a2"
[[projects]]
name = "github.com/cpuguy83/go-md2man"
packages = ["md2man"]
revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1"
version = "v1.0.8"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
name = "github.com/djherbis/times"
packages = ["."]
revision = "95292e44976d1217cf3611dc7c8d9466877d3ed5"
version = "v1.0.1"
[[projects]]
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
packages = [
"dropbox",
"dropbox/async",
"dropbox/common",
"dropbox/file_properties",
"dropbox/files",
"dropbox/seen_state",
"dropbox/sharing",
"dropbox/team_common",
"dropbox/team_policies",
"dropbox/users",
"dropbox/users_common"
]
revision = "7afa861bfde5a348d765522b303b6fbd9d250155"
version = "v4.1.0"
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
version = "v1.37.0"
[[projects]]
name = "github.com/golang/protobuf"
packages = ["proto"]
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/google/go-querystring"
packages = ["query"]
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
[[projects]]
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
branch = "master"
name = "github.com/jlaffaye/ftp"
packages = ["."]
revision = "2403248fa8cc9f7909862627aa7337f13f8e0bf1"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "0b12d6b5"
[[projects]]
branch = "master"
name = "github.com/kardianos/osext"
packages = ["."]
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
[[projects]]
name = "github.com/kr/fs"
packages = ["."]
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
version = "v0.1.0"
[[projects]]
name = "github.com/marstr/guid"
packages = ["."]
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
version = "v1.1.0"
[[projects]]
name = "github.com/mattn/go-runewidth"
packages = ["."]
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
version = "v0.0.2"
[[projects]]
branch = "master"
name = "github.com/ncw/go-acd"
packages = ["."]
revision = "887eb06ab6a255fbf5744b5812788e884078620a"
[[projects]]
name = "github.com/ncw/swift"
packages = ["."]
revision = "b2a7479cf26fa841ff90dd932d0221cb5c50782d"
version = "v1.0.39"
[[projects]]
branch = "master"
name = "github.com/nsf/termbox-go"
packages = ["."]
revision = "5c94acc5e6eb520f1bcd183974e01171cc4c23b3"
[[projects]]
branch = "master"
name = "github.com/okzk/sdnotify"
packages = ["."]
revision = "ed8ca104421a21947710335006107540e3ecb335"
[[projects]]
name = "github.com/patrickmn/go-cache"
packages = ["."]
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
version = "v2.1.0"
[[projects]]
name = "github.com/pengsrc/go-shared"
packages = [
"buffer",
"check",
"convert",
"log",
"reopen"
]
revision = "807ee759d82c84982a89fb3dc875ef884942f1e5"
version = "v0.2.0"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
name = "github.com/pkg/sftp"
packages = ["."]
revision = "57673e38ea946592a59c26592b7e6fbda646975b"
version = "1.8.0"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
name = "github.com/rfjakob/eme"
packages = ["."]
revision = "01668ae55fe0b79a483095689043cce3e80260db"
version = "v1.1"
[[projects]]
name = "github.com/russross/blackfriday"
packages = ["."]
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
version = "v1.5.1"
[[projects]]
name = "github.com/satori/go.uuid"
packages = ["."]
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
version = "v1.2.0"
[[projects]]
branch = "master"
name = "github.com/sevlyar/go-daemon"
packages = ["."]
revision = "f9261e73885de99b1647d68bedadf2b9a99ad11f"
[[projects]]
branch = "master"
name = "github.com/skratchdot/open-golang"
packages = ["open"]
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
[[projects]]
name = "github.com/spf13/cobra"
packages = [
".",
"doc"
]
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
version = "v0.0.3"
[[projects]]
name = "github.com/spf13/pflag"
packages = ["."]
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
[[projects]]
name = "github.com/stretchr/testify"
packages = [
"assert",
"require"
]
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
version = "v1.2.2"
[[projects]]
branch = "master"
name = "github.com/t3rm1n4l/go-mega"
packages = ["."]
revision = "57978a63bd3f91fa7e188b751a7e7e6dd4e33813"
[[projects]]
branch = "master"
name = "github.com/xanzy/ssh-agent"
packages = ["."]
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
[[projects]]
name = "github.com/yunify/qingstor-sdk-go"
packages = [
".",
"config",
"logger",
"request",
"request/builder",
"request/data",
"request/errors",
"request/signer",
"request/unpacker",
"service",
"utils"
]
revision = "4f9ac88c5fec7350e960aabd0de1f1ede0ad2895"
version = "v2.2.14"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
"blowfish",
"curve25519",
"ed25519",
"ed25519/internal/edwards25519",
"internal/chacha20",
"internal/subtle",
"nacl/secretbox",
"pbkdf2",
"poly1305",
"salsa20/salsa",
"scrypt",
"ssh",
"ssh/agent",
"ssh/terminal"
]
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"html",
"html/atom",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"publicsuffix",
"webdav",
"webdav/internal/xml",
"websocket"
]
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [
".",
"google",
"internal",
"jws",
"jwt"
]
revision = "1e0a3fa8ba9a5c9eb35c271780101fdaf1b205d7"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = [
"unix",
"windows"
]
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
[[projects]]
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
]
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "golang.org/x/time"
packages = ["rate"]
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
[[projects]]
branch = "master"
name = "google.golang.org/api"
packages = [
"drive/v3",
"gensupport",
"googleapi",
"googleapi/internal/uritemplates",
"storage/v1"
]
revision = "2eea9ba0a3d94f6ab46508083e299a00bbbc65f6"
[[projects]]
name = "google.golang.org/appengine"
packages = [
".",
"internal",
"internal/app_identity",
"internal/base",
"internal/datastore",
"internal/log",
"internal/modules",
"internal/remote_api",
"internal/urlfetch",
"log",
"urlfetch"
]
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
version = "v1.1.0"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "c1378c5fc821e27711155958ff64b3c74b56818ba4733dbfe0c86d518c32880e"
solver-name = "gps-cdcl"
solver-version = 1

11
Gopkg.toml Normal file
View File

@@ -0,0 +1,11 @@
# pin this to master to pull in the macOS changes
# can likely remove for 1.43
[[override]]
branch = "master"
name = "github.com/sevlyar/go-daemon"
# pin this to master to pull in the fix for linux/mips
# can likely remove for 1.43
[[override]]
branch = "master"
name = "github.com/coreos/bbolt"

43
ISSUE_TEMPLATE.md Normal file
View File

@@ -0,0 +1,43 @@
<!--
Hi!
We understand you are having a problem with rclone or have an idea for an improvement - we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum
https://forum.rclone.org/
instead of filing an issue. We'll reply quickly and it won't increase our massive issue backlog.
If you think you might have found a bug, please can you try to replicate it with the latest beta?
https://beta.rclone.org/
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
If you have an idea for an improvement, then please search the old issues first and if you don't find your idea, make a new issue.
Thanks
The Rclone Developers
-->
#### What is the problem you are having with rclone?
#### What is your rclone version (eg output from `rclone -V`)
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which cloud storage system are you using? (eg Google Drive)
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)

View File

@@ -1,21 +1,12 @@
# Maintainers guide for rclone # # Maintainers guide for rclone #
Current active maintainers of rclone are: Current active maintainers of rclone are
| Name | GitHub ID | Specific Responsibilities | * Nick Craig-Wood @ncw
| :--------------- | :---------------- | :-------------------------- | * Stefan Breunig @breunigs
| Nick Craig-Wood | @ncw | overall project health | * Ishuah Kariuki @ishuah
| Stefan Breunig | @breunigs | | * Remus Bunduc @remusb - cache subsystem maintainer
| Ishuah Kariuki | @ishuah | | * Fabian Möller @B4dM4n
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | tardigrade backend |
**This is a work in progress Draft** **This is a work in progress Draft**
@@ -33,7 +24,7 @@ Rclone uses the labels like this:
* `duplicate` - normally close these and ask the user to subscribe to the original * `duplicate` - normally close these and ask the user to subscribe to the original
* `enhancement: new remote` - a new rclone backend * `enhancement: new remote` - a new rclone backend
* `enhancement` - a new feature * `enhancement` - a new feature
* `FUSE` - to do with `rclone mount` command * `FUSE` - do do with `rclone mount` command
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project * `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project * `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
* `IMPORTANT` - note to maintainers not to forget to fix this for the release * `IMPORTANT` - note to maintainers not to forget to fix this for the release
@@ -55,7 +46,7 @@ The milestones have these meanings:
* Help wanted - blue sky stuff that might get moved up, or someone could help with * Help wanted - blue sky stuff that might get moved up, or someone could help with
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment * Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up. Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
## Closing Tickets ## ## Closing Tickets ##
@@ -65,7 +56,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
Try to process pull requests promptly! Try to process pull requests promptly!
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message. Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`. After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.

20279
MANUAL.html

File diff suppressed because it is too large Load Diff

22863
MANUAL.md

File diff suppressed because it is too large Load Diff

22106
MANUAL.txt

File diff suppressed because it is too large Load Diff

252
Makefile
View File

@@ -1,139 +1,117 @@
SHELL = bash SHELL = bash
# Branch we are working on BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
BRANCH := $(or $(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD)) TAG_BRANCH := -$(BRANCH)
# Tag of the current commit, if any. If this is not "" then we are building a release BRANCH_PATH := branch/
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
# Version of last release (may not be on this branch)
VERSION := $(shell cat VERSION)
# Last tag on this branch
LAST_TAG := $(shell git describe --tags --abbrev=0)
# Next version
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
# If we are working on a release, override branch to master
ifdef RELEASE_TAG
BRANCH := master
LAST_TAG := $(shell git describe --abbrev=0 --tags $(VERSION)^)
endif
TAG_BRANCH := .$(BRANCH)
BRANCH_PATH := branch/$(BRANCH)/
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),) ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
TAG_BRANCH := TAG_BRANCH :=
BRANCH_PATH := BRANCH_PATH :=
endif endif
# Make version suffix -beta.NNNN.CCCCCCCC (N=Commit number, C=Commit) TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
VERSION_SUFFIX := -beta.$(shell git rev-list --count HEAD).$(shell git show --no-patch --no-notes --pretty='%h' HEAD) LAST_TAG := $(shell git describe --tags --abbrev=0)
# TAG is current version + commit number + commit + branch NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
ifdef RELEASE_TAG
TAG := $(RELEASE_TAG)
endif
GO_VERSION := $(shell go version) GO_VERSION := $(shell go version)
ifdef BETA_SUBDIR GO_FILES := $(shell go list ./... | grep -v /vendor/ )
BETA_SUBDIR := /$(BETA_SUBDIR) # Run full tests if go >= go1.9
endif FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 9)')
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR) BETA_PATH := $(BRANCH_PATH)$(TAG)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/ BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH) BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
# Pass in GOTAGS=xyz on the make command line to set build tags # Pass in GOTAGS=xyz on the make command line to set build tags
ifdef GOTAGS ifdef GOTAGS
BUILDTAGS=-tags "$(GOTAGS)" BUILDTAGS=-tags "$(GOTAGS)"
LINTTAGS=--build-tags "$(GOTAGS)"
endif endif
.PHONY: rclone test_all vars version .PHONY: rclone vars version
rclone: rclone:
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) touch fs/version.go
mkdir -p `go env GOPATH`/bin/ go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new cp -av `go env GOPATH`/bin/rclone .
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
test_all:
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
vars: vars:
@echo SHELL="'$(SHELL)'" @echo SHELL="'$(SHELL)'"
@echo BRANCH="'$(BRANCH)'" @echo BRANCH="'$(BRANCH)'"
@echo TAG="'$(TAG)'" @echo TAG="'$(TAG)'"
@echo VERSION="'$(VERSION)'" @echo LAST_TAG="'$(LAST_TAG)'"
@echo NEW_TAG="'$(NEW_TAG)'"
@echo GO_VERSION="'$(GO_VERSION)'" @echo GO_VERSION="'$(GO_VERSION)'"
@echo FULL_TESTS="'$(FULL_TESTS)'"
@echo BETA_URL="'$(BETA_URL)'" @echo BETA_URL="'$(BETA_URL)'"
btest:
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
@echo "Copied markdown of beta release to clip board"
version: version:
@echo '$(TAG)' @echo '$(TAG)'
# Full suite of integration tests # Full suite of integration tests
test: rclone test_all test: rclone
-test_all 2>&1 | tee test_all.log go install github.com/ncw/rclone/fstest/test_all
@echo "Written logs in test_all.log" -go test -v -count 1 $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
@echo "Written logs in test.log and fs/test_all.log"
# Quick test # Quick test
quicktest: quicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./... RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
ifdef FULL_TESTS
racequicktest: RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./... endif
# Do source code quality checks # Do source code quality checks
check: rclone check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------" ifdef FULL_TESTS
@golangci-lint run $(LINTTAGS) ./... go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
@echo "-- END CODE QUALITY REPORT ---------------------------------" errcheck $(BUILDTAGS) ./...
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
else
@echo Skipping source quality tests as version of go too old
endif
gometalinter_install:
go get -u github.com/alecthomas/gometalinter
gometalinter --install --update
# We aren't using gometalinter as the default linter yet because
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
# 2. can't get -printfuncs working with the vet linter
gometalinter:
gometalinter ./...
# Get the build dependencies # Get the build dependencies
build_dep: build_dep:
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz' ifdef FULL_TESTS
go get -u github.com/kisielk/errcheck
go get -u golang.org/x/tools/cmd/goimports
go get -u github.com/golang/lint/golint
go get -u github.com/tools/godep
endif
# Get the release dependencies we only install on linux # Get the release dependencies
release_dep_linux: release_dep:
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz' go get -u github.com/goreleaser/nfpm/...
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2' go get -u github.com/aktau/github-release
# Get the release dependencies we only install on Windows
release_dep_windows:
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
# Update dependencies # Update dependencies
showupdates:
@echo "*** Direct dependencies that could be updated ***"
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
# Update direct and indirect dependencies and test dependencies
update: update:
GO111MODULE=on go get -u -t ./... go get -u github.com/golang/dep/cmd/dep
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all) dep ensure -update -v
GO111MODULE=on go mod tidy
# Tidy the module dependencies doc: rclone.1 MANUAL.html MANUAL.txt
tidy:
GO111MODULE=on go mod tidy
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
rclone.1: MANUAL.md rclone.1: MANUAL.md
pandoc -s --from markdown-smart --to man MANUAL.md -o rclone.1 pandoc -s --from markdown --to man MANUAL.md -o rclone.1
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs
./bin/make_manual.py ./bin/make_manual.py
MANUAL.html: MANUAL.md MANUAL.html: MANUAL.md
pandoc -s --from markdown-smart --to html MANUAL.md -o MANUAL.html pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
MANUAL.txt: MANUAL.md MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone commanddocs: rclone
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/ rclone gendocs docs/content/commands/
backenddocs: rclone bin/make_backend_docs.py
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
rcdocs: rclone rcdocs: rclone
bin/make_rc_docs.sh bin/make_rc_docs.sh
@@ -149,27 +127,14 @@ clean:
rm -f rclone fs/operations/operations.test fs/sync/sync.test fs/test_all.log test.log rm -f rclone fs/operations/operations.test fs/sync/sync.test fs/test_all.log test.log
website: website:
rm -rf docs/public
cd docs && hugo cd docs && hugo
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
upload_website: website upload_website: website
rclone -v sync docs/public memstore:www-rclone-org rclone -v sync docs/public memstore:www-rclone-org
upload_test_website: website
rclone -P sync docs/public test-rclone-org:
validate_website: website
find docs/public -type f -name "*.html" | xargs tidy --mute-id yes -errors --gnu-emacs yes --drop-empty-elements no --warn-proprietary-attributes no --mute MISMATCHED_ATTRIBUTE_WARN
tarball: tarball:
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG) git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
vendorball:
go mod vendor
tar -zcf build/rclone-$(TAG)-vendor.tar.gz vendor
rm -rf vendor
sign_upload: sign_upload:
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
@@ -181,8 +146,8 @@ check_sign:
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
upload: upload:
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG) rclone -v copy --exclude '*current*' build/ memstore:downloads-rclone-org/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"' rclone -v copy --include '*current*' --include version.txt build/ memstore:downloads-rclone-org
upload_github: upload_github:
./bin/upload-github $(TAG) ./bin/upload-github $(TAG)
@@ -191,69 +156,74 @@ cross: doc
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG) go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
beta: beta:
go run bin/cross-compile.go $(BUILDTAGS) $(TAG) go run bin/cross-compile.go $(BUILDTAGS) $(TAG)β
rclone -v copy build/ memstore:pub-rclone-org/$(TAG) rclone -v copy build/ memstore:pub-rclone-org/$(TAG)β
@echo Beta release ready at https://pub.rclone.org/$(TAG)/ @echo Beta release ready at https://pub.rclone.org/$(TAG)%CE%B2/
log_since_last_release: log_since_last_release:
git log $(LAST_TAG).. git log $(LAST_TAG)..
compile_all: compile_all:
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG) ifdef FULL_TESTS
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)β
ci_upload: else
sudo chown -R $$USER build @echo Skipping compile all as version of go too old
find build -type l -delete
gzip -r9v build
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifndef BRANCH_PATH
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif endif
@echo Beta release ready at $(BETA_URL)/testbuilds
ci_beta: appveyor_upload:
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR) rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif endif
@echo Beta release ready at $(BETA_URL) @echo Beta release ready at $(BETA_URL)
# Fetch the binary builds from GitHub actions BUILD_FLAGS := -exclude "^(windows|darwin)/"
fetch_binaries: ifeq ($(TRAVIS_OS_NAME),osx)
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/ BUILD_FLAGS := -include "^darwin/" -cgo
endif
travis_beta:
ifeq ($(TRAVIS_OS_NAME),linux)
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
endif
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)β
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
# Fetch the windows builds from appveyor
fetch_windows:
rclone -v copy --include 'rclone-v*-windows-*.zip' $(BETA_UPLOAD) build/
-#cp -av build/rclone-v*-windows-386.zip build/rclone-current-windows-386.zip
-#cp -av build/rclone-v*-windows-amd64.zip build/rclone-current-windows-amd64.zip
md5sum build/rclone-*-windows-*.zip | sort
serve: website serve: website
cd docs && hugo server -v -w --disableFastRender cd docs && hugo server -v -w
tag: retag doc tag: doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new @echo "Old tag is $(LAST_TAG)"
mv docs/content/changelog.md.new docs/content/changelog.md @echo "New tag is $(NEW_TAG)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
@echo "Edit the new changelog in docs/content/changelog.md" @echo "Edit the new changelog in docs/content/changelog.md"
@echo "Then commit all the changes" @echo " * $(NEW_TAG) -" `date -I` >> docs/content/changelog.md
@echo git commit -m \"Version $(VERSION)\" -a -v @git log $(LAST_TAG)..$(NEW_TAG) --oneline >> docs/content/changelog.md
@echo "Then commit the changes"
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
@echo "And finally run make retag before make cross etc" @echo "And finally run make retag before make cross etc"
retag: retag:
@echo "Version is $(VERSION)" git tag -f -s -m "Version $(LAST_TAG)" $(LAST_TAG)
git tag -f -s -m "Version $(VERSION)" $(VERSION)
startdev: startdev:
@echo "Version is $(VERSION)" echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
@echo "Next version is $(NEXT_VERSION)" git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
startstable:
@echo "Version is $(VERSION)"
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_PATCH_VERSION)" > VERSION
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
winzip: winzip:
zip -9 rclone-$(TAG).zip rclone.exe zip -9 rclone-$(TAG).zip rclone.exe

130
README.md
View File

@@ -1,111 +1,61 @@
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/) [![Logo](https://rclone.org/img/rclone-120x120.png)](https://rclone.org/)
[Website](https://rclone.org) | [Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) | [Documentation](https://rclone.org/docs/) |
[Download](https://rclone.org/downloads/) |
[Contributing](CONTRIBUTING.md) | [Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) | [Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) | [Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/) [Forum](https://forum.rclone.org/)
[G+](https://google.com/+RcloneOrg)
[![Build Status](https://github.com/rclone/rclone/workflows/build/badge.svg)](https://github.com/rclone/rclone/actions?query=workflow%3Abuild) [![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone) [![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/ncw/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/ncw/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone) [![CircleCI](https://circleci.com/gh/ncw/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/ncw/rclone/tree/master)
[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone) [![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
# Rclone Rclone is a command line program to sync files and directories to and from
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers. * Amazon Drive
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
* Backblaze B2
* Box
* Dropbox
* FTP
* Google Cloud Storage
* Google Drive
* HTTP
* Hubic
* Mega
* Microsoft Azure Blob Storage
* Microsoft OneDrive
* OpenDrive
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
* pCloud
* QingStor
* SFTP
* Webdav / Owncloud / Nextcloud
* Yandex Disk
* The local filesystem
## Storage providers Features
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/) * MD5/SHA1 hashes checked at all times for file integrity
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Memory [:page_facing_up:](https://rclone.org/memory/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
## Features
* MD5/SHA-1 hashes checked at all times for file integrity
* Timestamps preserved on files * Timestamps preserved on files
* Partial syncs supported on a whole file basis * Partial syncs supported on a whole file basis
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files * Copy mode to just copy new/changed files
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical * Sync (one way) mode to make a directory identical
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality * Check mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts * Can sync to and from network, eg two different cloud accounts
* Optional large file chunking ([Chunker](https://rclone.org/chunker/)) * Optional encryption (Crypt)
* Optional encryption ([Crypt](https://rclone.org/crypt/)) * Optional FUSE mount
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
## Installation & documentation See the home page for installation, usage, documentation, changelog
and configuration walkthroughs.
Please see the [rclone website](https://rclone.org/) for: * https://rclone.org/
* [Installation](https://rclone.org/install/)
* [Documentation & configuration](https://rclone.org/docs/)
* [Changelog](https://rclone.org/changelog/)
* [FAQ](https://rclone.org/faq/)
* [Storage providers](https://rclone.org/overview/)
* [Forum](https://forum.rclone.org/)
* ...and more
## Downloads
* https://rclone.org/downloads/
License License
------- -------
This is free software under the terms of MIT the license (check the This is free software under the terms of MIT the license (check the
[COPYING file](/COPYING) included in this package). COPYING file included in this package).

View File

@@ -1,87 +1,41 @@
# Release Extra required software for making a release
This file describes how to make the various kinds of releases
## Extra required software for making a release
* [github-release](https://github.com/aktau/github-release) for uploading packages * [github-release](https://github.com/aktau/github-release) for uploading packages
* pandoc for making the html and man pages * pandoc for making the html and man pages
## Making a release Making a release
* git checkout master # see below for stable branch
* git pull
* git status - make sure everything is checked in * git status - make sure everything is checked in
* Check GitHub actions build for master is Green * Check travis & appveyor builds are green
* make check
* make test # see integration test server or run locally * make test # see integration test server or run locally
* make tag * make tag
* edit docs/content/changelog.md # make sure to remove duplicate logs from point releases * edit docs/content/changelog.md
* make tidy
* make doc * make doc
* git status - to check for new man pages - git add them * git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0" * git commit -a -v -m "Version v1.XX"
* make retag * make retag
* make release_dep
* # Set the GOPATH for a current stable go compiler
* make cross
* git checkout docs/content/commands # to undo date changes in commands
* git push --tags origin master * git push --tags origin master
* # Wait for the GitHub builds to complete then... * git push --tags origin master:stable # update the stable branch for packager.io
* make fetch_binaries * # Wait for the appveyor and travis builds to complete then fetch the windows binaries from appveyor
* make fetch_windows
* make tarball * make tarball
* make vendorball
* make sign_upload * make sign_upload
* make check_sign * make check_sign
* make upload * make upload
* make upload_website * make upload_website
* make upload_github * make upload_github
* make startdev # make startstable for stable branch * make startdev
* # announce with forum post, twitter post, patreon post * # announce with forum post, twitter post, G+ post
Early in the next release cycle update the dependencies Early in the next release cycle update the vendored dependencies
* Review any pinned packages in Gopkg.toml and remove if possible
* Review any pinned packages in go.mod and remove if possible
* make update * make update
* git status * git status
* git add new files * git add new files
* carry forward any patches to vendor stuff
* git commit -a -v * git commit -a -v
## Making a point release Make the version number be just in a file?
If rclone needs a point release due to some horrendous bug:
Set vars
* BASE_TAG=v1.XX # eg v1.52
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
First make the release branch. If this is a second point release then
this will be done already.
* git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* make startstable
Now
* git co ${BASE_TAG}-stable
* git cherry-pick any fixes
* Do the steps as above
* make startstable
* NB this overwrites the current beta so we need to do this - FIXME is this true any more?
* git co master
* # cherry pick the changes to the changelog
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push
## Making a manual build of docker
The rclone docker image should autobuild on via GitHub actions. If it doesn't
or needs to be updated then rebuild like this.
```
docker pull golang
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.52.0
docker push rclone/rclone:1.52
docker push rclone/rclone:1
docker push rclone/rclone:latest
```

View File

@@ -1 +0,0 @@
v1.53.1

View File

@@ -2,50 +2,44 @@ package alias
import ( import (
"errors" "errors"
"path"
"path/filepath"
"strings" "strings"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fs/cache" "github.com/ncw/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
) )
// Register with Fs // Register with Fs
func init() { func init() {
fsi := &fs.RegInfo{ fsi := &fs.RegInfo{
Name: "alias", Name: "alias",
Description: "Alias for an existing remote", Description: "Alias for a existing remote",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".", Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
Required: true,
}}, }},
} }
fs.Register(fsi) fs.Register(fsi)
} }
// Options defines the configuration for this backend // NewFs contstructs an Fs from the path.
type Options struct {
Remote string `config:"remote"`
}
// NewFs constructs an Fs from the path.
// //
// The returned Fs is the actual Fs, referenced by remote in the config // The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string) (fs.Fs, error) {
// Parse config into Options struct remote := config.FileGet(name, "remote")
opt := new(Options) if remote == "" {
err := configstruct.Set(m, opt) return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
}
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
}
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if opt.Remote == "" {
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting") root = filepath.ToSlash(root)
} return fsInfo.NewFs(configName, path.Join(fsPath, root))
if strings.HasPrefix(opt.Remote, name+":") {
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
}
return cache.Get(fspath.JoinRootPath(opt.Remote, root))
} }

View File

@@ -1,16 +1,15 @@
package alias package alias
import ( import (
"context"
"fmt" "fmt"
"path" "path"
"path/filepath" "path/filepath"
"sort" "sort"
"testing" "testing"
_ "github.com/rclone/rclone/backend/local" // pull in test backend _ "github.com/ncw/rclone/backend/local" // pull in test backend
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/ncw/rclone/fs/config"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -70,7 +69,7 @@ func TestNewFS(t *testing.T) {
prepare(t, remoteRoot) prepare(t, remoteRoot)
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot)) f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
require.NoError(t, err, what) require.NoError(t, err, what)
gotEntries, err := f.List(context.Background(), test.fsList) gotEntries, err := f.List(test.fsList)
require.NoError(t, err, what) require.NoError(t, err, what)
sort.Sort(gotEntries) sort.Sort(gotEntries)
@@ -81,7 +80,7 @@ func TestNewFS(t *testing.T) {
wantEntry := test.entries[i] wantEntry := test.entries[i]
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what) require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
require.Equal(t, wantEntry.size, gotEntry.Size(), what) require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what)
_, isDir := gotEntry.(fs.Directory) _, isDir := gotEntry.(fs.Directory)
require.Equal(t, wantEntry.isDir, isDir, what) require.Equal(t, wantEntry.isDir, isDir, what)
} }

View File

@@ -2,42 +2,28 @@ package all
import ( import (
// Active file systems // Active file systems
_ "github.com/rclone/rclone/backend/alias" _ "github.com/ncw/rclone/backend/alias"
_ "github.com/rclone/rclone/backend/amazonclouddrive" _ "github.com/ncw/rclone/backend/amazonclouddrive"
_ "github.com/rclone/rclone/backend/azureblob" _ "github.com/ncw/rclone/backend/azureblob"
_ "github.com/rclone/rclone/backend/b2" _ "github.com/ncw/rclone/backend/b2"
_ "github.com/rclone/rclone/backend/box" _ "github.com/ncw/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache" _ "github.com/ncw/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker" _ "github.com/ncw/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/crypt" _ "github.com/ncw/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/drive" _ "github.com/ncw/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/dropbox" _ "github.com/ncw/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/fichier" _ "github.com/ncw/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/ftp" _ "github.com/ncw/rclone/backend/http"
_ "github.com/rclone/rclone/backend/googlecloudstorage" _ "github.com/ncw/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/googlephotos" _ "github.com/ncw/rclone/backend/local"
_ "github.com/rclone/rclone/backend/http" _ "github.com/ncw/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/hubic" _ "github.com/ncw/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/jottacloud" _ "github.com/ncw/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/koofr" _ "github.com/ncw/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/local" _ "github.com/ncw/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/mailru" _ "github.com/ncw/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/mega" _ "github.com/ncw/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/memory" _ "github.com/ncw/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/onedrive" _ "github.com/ncw/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/opendrive" _ "github.com/ncw/rclone/backend/yandex"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/tardigrade"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex"
) )

View File

@@ -12,7 +12,6 @@ we ignore assets completely!
*/ */
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@@ -22,34 +21,35 @@ import (
"strings" "strings"
"time" "time"
acd "github.com/ncw/go-acd" "github.com/ncw/go-acd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
const ( const (
folderKind = "FOLDER" folderKind = "FOLDER"
fileKind = "FILE" fileKind = "FILE"
statusAvailable = "AVAILABLE" statusAvailable = "AVAILABLE"
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
minSleep = 20 * time.Millisecond minSleep = 20 * time.Millisecond
warnFileSize = 50000 << 20 // Display warning for files larger than this size warnFileSize = 50000 << 20 // Display warning for files larger than this size
defaultTempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
) )
// Globals // Globals
var ( var (
// Flags
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
uploadWaitPerGB = flags.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
// Description of how to auth for this app // Description of how to auth for this app
acdConfig = &oauth2.Config{ acdConfig = &oauth2.Config{
Scopes: []string{"clouddrive:read_all", "clouddrive:write"}, Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
@@ -67,93 +67,45 @@ var (
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
Name: "amazon cloud drive", Name: "amazon cloud drive",
Prefix: "acd",
Description: "Amazon Drive", Description: "Amazon Drive",
NewFs: NewFs, NewFs: NewFs,
Config: func(name string, m configmap.Mapper) { Config: func(name string) {
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig, nil) err := oauthutil.Config("amazon cloud drive", name, acdConfig)
if err != nil { if err != nil {
log.Fatalf("Failed to configure token: %v", err) log.Fatalf("Failed to configure token: %v", err)
} }
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: []fs.Option{{
Name: "checkpoint", Name: config.ConfigClientID,
Help: "Checkpoint for internal polling (debug).", Help: "Amazon Application Client Id - required.",
Hide: fs.OptionHideBoth,
Advanced: true,
}, { }, {
Name: "upload_wait_per_gb", Name: config.ConfigClientSecret,
Help: `Additional time per GB to wait after a failed complete upload to see if it appears. Help: "Amazon Application Client Secret - required.",
Sometimes Amazon Drive gives an error when a file has been fully
uploaded but the file appears anyway after a little while. This
happens sometimes for files over 1GB in size and nearly every time for
files bigger than 10GB. This parameter controls the time rclone waits
for the file to appear.
The default value for this parameter is 3 minutes per GB, so by
default it will wait 3 minutes for every GB uploaded to see if the
file appears.
You can disable this feature by setting it to 0. This may cause
conflict errors as rclone retries the failed upload but the file will
most likely appear correctly eventually.
These values were determined empirically by observing lots of uploads
of big files for a range of file sizes.
Upload with the "-v" flag to see more info about what rclone is doing
in this situation.`,
Default: fs.Duration(180 * time.Second),
Advanced: true,
}, { }, {
Name: "templink_threshold", Name: config.ConfigAuthURL,
Help: `Files >= this size will be downloaded via their tempLink. Help: "Auth server URL - leave blank to use Amazon's.",
Files this size or more will be downloaded via their "tempLink". This
is to work around a problem with Amazon Drive which blocks downloads
of files bigger than about 10GB. The default for this is 9GB which
shouldn't need to be changed.
To download files above this threshold, rclone requests a "tempLink"
which downloads the file through a temporary URL directly from the
underlying S3 storage.`,
Default: defaultTempLinkThreshold,
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigTokenURL,
Help: config.ConfigEncodingHelp, Help: "Token server url - leave blank to use Amazon's.",
Advanced: true, }},
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Base |
encoder.EncodeInvalidUtf8),
}}...),
}) })
} flags.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
// Options defines the configuration for this backend
type Options struct {
Checkpoint string `config:"checkpoint"`
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
Enc encoder.MultiEncoder `config:"encoding"`
} }
// Fs represents a remote acd server // Fs represents a remote acd server
type Fs struct { type Fs struct {
name string // name of this remote name string // name of this remote
features *fs.Features // optional features features *fs.Features // optional features
opt Options // options for this Fs
c *acd.Client // the connection to the acd server c *acd.Client // the connection to the acd server
noAuthClient *http.Client // unauthenticated http client noAuthClient *http.Client // unauthenticated http client
root string // the path we are working on root string // the path we are working on
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls pacer *pacer.Pacer // pacer for API calls
trueRootID string // ID of true root directory trueRootID string // ID of true root directory
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
} }
// Object describes an acd object // Object describes a acd object
// //
// Will definitely have info but maybe not meta // Will definitely have info but maybe not meta
type Object struct { type Object struct {
@@ -213,7 +165,7 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
} }
// Work around receiving this error sporadically on authentication // Work around receiving this error sporadically on authentication
// //
// HTTP code 403: "403 Forbidden", response body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"} // HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") { if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") {
fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry") fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry")
return true, err return true, err
@@ -239,14 +191,7 @@ func filterRequest(req *http.Request) {
} }
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
root = parsePath(root) root = parsePath(root)
baseClient := fshttp.NewClient(fs.Config) baseClient := fshttp.NewClient(fs.Config)
if do, ok := baseClient.Transport.(interface { if do, ok := baseClient.Transport.(interface {
@@ -256,18 +201,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} else { } else {
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail") fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
} }
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient) oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, acdConfig, baseClient)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to configure Amazon Drive") log.Fatalf("Failed to configure Amazon Drive: %v", err)
} }
c := acd.NewClient(oAuthClient) c := acd.NewClient(oAuthClient)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt,
c: c, c: c,
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))), pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
noAuthClient: fshttp.NewClient(fs.Config), noAuthClient: fshttp.NewClient(fs.Config),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
@@ -302,20 +246,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.dirCache = dircache.New(root, f.trueRootID, f) f.dirCache = dircache.New(root, f.trueRootID, f)
// Find the current root // Find the current root
err = f.dirCache.FindRoot(ctx, false) err = f.dirCache.FindRoot(false)
if err != nil { if err != nil {
// Assume it is a file // Assume it is a file
newRoot, remote := dircache.SplitPath(root) newRoot, remote := dircache.SplitPath(root)
tempF := *f newF := *f
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF) newF.dirCache = dircache.New(newRoot, f.trueRootID, &newF)
tempF.root = newRoot newF.root = newRoot
// Make new Fs which is the parent // Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false) err = newF.dirCache.FindRoot(false)
if err != nil { if err != nil {
// No root so return old f // No root so return old f
return f, nil return f, nil
} }
_, err := tempF.newObjectWithInfo(ctx, remote, nil) _, err := newF.newObjectWithInfo(remote, nil)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -323,13 +267,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
return nil, err return nil, err
} }
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
return f, fs.ErrorIsFile return &newF, fs.ErrorIsFile
} }
return f, nil return f, nil
} }
@@ -347,7 +286,7 @@ func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
// Return an Object from a path // Return an Object from a path
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) { func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
@@ -356,7 +295,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Nod
// Set info but not meta // Set info but not meta
o.info = info o.info = info
} else { } else {
err := o.readMetaData(ctx) // reads info and meta, returning an error err := o.readMetaData() // reads info and meta, returning an error
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -366,18 +305,18 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Nod
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil) return f.newObjectWithInfo(remote, nil)
} }
// FindLeaf finds a directory of name leaf in the folder with ID pathID // FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf) //fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes) folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response var resp *http.Response
var subFolder *acd.Folder var subFolder *acd.Folder
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf)) subFolder, resp, err = folder.GetFolder(leaf)
return f.shouldRetry(resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -398,13 +337,13 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
} }
// CreateDir makes a directory with pathID as parent and name leaf // CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf) //fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes) folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response var resp *http.Response
var info *acd.Folder var info *acd.Folder
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf)) info, resp, err = folder.CreateFolder(leaf)
return f.shouldRetry(resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -472,7 +411,6 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
if !hasValidParent { if !hasValidParent {
continue continue
} }
*node.Name = f.opt.Enc.ToStandardName(*node.Name)
// Store the nodes up in case we have to retry the listing // Store the nodes up in case we have to retry the listing
out = append(out, node) out = append(out, node)
} }
@@ -497,8 +435,12 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
directoryID, err := f.dirCache.FindDir(ctx, dir, false) err = f.dirCache.FindRoot(false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(dir, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -516,7 +458,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
d := fs.NewDir(remote, when).SetID(*node.Id) d := fs.NewDir(remote, when).SetID(*node.Id)
entries = append(entries, d) entries = append(entries, d)
case fileKind: case fileKind:
o, err := f.newObjectWithInfo(ctx, remote, node) o, err := f.newObjectWithInfo(remote, node)
if err != nil { if err != nil {
iErr = err iErr = err
return true return true
@@ -560,7 +502,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// At the end of large uploads. The speculation is that the timeout // At the end of large uploads. The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well // is waiting for the sha1 hashing to complete and the file may well
// be properly uploaded. // be properly uploaded.
func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) { func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
// Return if no error - all is well // Return if no error - all is well
if inErr == nil { if inErr == nil {
return false, inInfo, inErr return false, inInfo, inErr
@@ -585,13 +527,13 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
} }
// Don't wait for uploads - assume they will appear later // Don't wait for uploads - assume they will appear later
if f.opt.UploadWaitPerGB <= 0 { if *uploadWaitPerGB <= 0 {
fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus) fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
return false, inInfo, inErr return false, inInfo, inErr
} }
// Time we should wait for the upload // Time we should wait for the upload
uploadWaitPerByte := float64(f.opt.UploadWaitPerGB) / 1024 / 1024 / 1024 uploadWaitPerByte := float64(*uploadWaitPerGB) / 1024 / 1024 / 1024
timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size())) timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))
const sleepTime = 5 * time.Second // sleep between tries const sleepTime = 5 * time.Second // sleep between tries
@@ -600,7 +542,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus) fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
remote := src.Remote() remote := src.Remote()
for i := 1; i <= retries; i++ { for i := 1; i <= retries; i++ {
o, err := f.NewObject(ctx, remote) o, err := f.NewObject(remote)
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries) fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
} else if err != nil { } else if err != nil {
@@ -626,7 +568,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote() remote := src.Remote()
size := src.Size() size := src.Size()
// Temporary Object under construction // Temporary Object under construction
@@ -635,17 +577,17 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
remote: remote, remote: remote,
} }
// Check if object already exists // Check if object already exists
err := o.readMetaData(ctx) err := o.readMetaData()
switch err { switch err {
case nil: case nil:
return o, o.Update(ctx, in, src, options...) return o, o.Update(in, src, options...)
case fs.ErrorObjectNotFound: case fs.ErrorObjectNotFound:
// Not found so create it // Not found so create it
default: default:
return nil, err return nil, err
} }
// If not create it // If not create it
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -658,10 +600,10 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
err = f.pacer.CallNoRetry(func() (bool, error) { err = f.pacer.CallNoRetry(func() (bool, error) {
start := time.Now() start := time.Now()
f.tokenRenewer.Start() f.tokenRenewer.Start()
info, resp, err = folder.Put(in, f.opt.Enc.FromStandardName(leaf)) info, resp, err = folder.Put(in, leaf)
f.tokenRenewer.Stop() f.tokenRenewer.Stop()
var ok bool var ok bool
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start)) ok, info, err = f.checkUpload(resp, in, src, info, err, time.Since(start))
if ok { if ok {
return false, nil return false, nil
} }
@@ -675,8 +617,14 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true) err := f.dirCache.FindRoot(true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(dir, true)
}
return err return err
} }
@@ -689,7 +637,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$' // go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
@@ -698,11 +646,15 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// create the destination directory if necessary // create the destination directory if necessary
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) err := f.dirCache.FindRoot(true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true) srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(srcObj.remote, false)
if err != nil {
return nil, err
}
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(remote, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -718,12 +670,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcErr, dstErr error srcErr, dstErr error
) )
for i := 1; i <= fs.Config.LowLevelRetries; i++ { for i := 1; i <= fs.Config.LowLevelRetries; i++ {
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object _, srcErr = srcObj.fs.NewObject(srcObj.remote) // try reading the object
if srcErr != nil && srcErr != fs.ErrorObjectNotFound { if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
// exit if error on source // exit if error on source
return nil, srcErr return nil, srcErr
} }
dstObj, dstErr = f.NewObject(ctx, remote) dstObj, dstErr = f.NewObject(remote)
if dstErr != nil && dstErr != fs.ErrorObjectNotFound { if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
// exit if error on dst // exit if error on dst
return nil, dstErr return nil, dstErr
@@ -752,7 +704,7 @@ func (f *Fs) DirCacheFlush() {
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(src, "DirMove error: not same remote type") fs.Debugf(src, "DirMove error: not same remote type")
@@ -767,31 +719,61 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return errors.New("can't move root directory") return errors.New("can't move root directory")
} }
// find the root src directory
err = srcFs.dirCache.FindRoot(false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary // Find ID of dst parent, creating subdirs if necessary
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, dstRemote, true) findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(findPath, true)
if err != nil { if err != nil {
return err return err
} }
// Check destination does not exist // Check destination does not exist
_, err = f.dirCache.FindDir(ctx, dstRemote, false) if dstRemote != "" {
if err == fs.ErrorDirNotFound { _, err = f.dirCache.FindDir(dstRemote, false)
// OK if err == fs.ErrorDirNotFound {
} else if err != nil { // OK
return err } else if err != nil {
} else { return err
return fs.ErrorDirExists } else {
return fs.ErrorDirExists
}
} }
// Find ID of src parent // Find ID of src parent
_, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcRemote, false) findPath = srcRemote
var srcDirectoryID string
if srcRemote == "" {
srcDirectoryID, err = srcFs.dirCache.RootParentID()
} else {
_, srcDirectoryID, err = srcFs.dirCache.FindPath(findPath, false)
}
if err != nil { if err != nil {
return err return err
} }
srcLeaf, _ := dircache.SplitPath(srcPath) srcLeaf, _ := dircache.SplitPath(srcPath)
// Find ID of src // Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
if err != nil { if err != nil {
return err return err
} }
@@ -824,13 +806,17 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// purgeCheck remotes the root directory, if check is set then it // purgeCheck remotes the root directory, if check is set then it
// refuses to do so if it has anything in // refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { func (f *Fs) purgeCheck(dir string, check bool) error {
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
if root == "" { if root == "" {
return errors.New("can't purge root directory") return errors.New("can't purge root directory")
} }
dc := f.dirCache dc := f.dirCache
rootID, err := dc.FindDir(ctx, dir, false) err := dc.FindRoot(false)
if err != nil {
return err
}
rootID, err := dc.FindDir(dir, false)
if err != nil { if err != nil {
return err return err
} }
@@ -879,8 +865,8 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
// Rmdir deletes the root folder // Rmdir deletes the root folder
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
return f.purgeCheck(ctx, dir, true) return f.purgeCheck(dir, true)
} }
// Precision return the precision of this Fs // Precision return the precision of this Fs
@@ -902,7 +888,7 @@ func (f *Fs) Hashes() hash.Set {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
//func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { //func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
// srcObj, ok := src.(*Object) // srcObj, ok := src.(*Object)
// if !ok { // if !ok {
// fs.Debugf(src, "Can't copy - not same remote type") // fs.Debugf(src, "Can't copy - not same remote type")
@@ -913,7 +899,7 @@ func (f *Fs) Hashes() hash.Set {
// if err != nil { // if err != nil {
// return nil, err // return nil, err
// } // }
// return f.NewObject(ctx, remote), nil // return f.NewObject(remote), nil
//} //}
// Purge deletes all the files and the container // Purge deletes all the files and the container
@@ -921,8 +907,8 @@ func (f *Fs) Hashes() hash.Set {
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error { func (f *Fs) Purge() error {
return f.purgeCheck(ctx, dir, false) return f.purgeCheck("", false)
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -946,7 +932,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the Md5sum of an object returning a lowercase hex string // Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 { if t != hash.MD5 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -969,11 +955,11 @@ func (o *Object) Size() int64 {
// it also sets the info // it also sets the info
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (o *Object) readMetaData(ctx context.Context) (err error) { func (o *Object) readMetaData() (err error) {
if o.info != nil { if o.info != nil {
return nil return nil
} }
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, o.remote, false) leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
return fs.ErrorObjectNotFound return fs.ErrorObjectNotFound
@@ -984,7 +970,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
var resp *http.Response var resp *http.Response
var info *acd.File var info *acd.File
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf)) info, resp, err = folder.GetFile(leaf)
return o.fs.shouldRetry(resp, err) return o.fs.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -1002,8 +988,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
err := o.readMetaData(ctx) err := o.readMetaData()
if err != nil { if err != nil {
fs.Debugf(o, "Failed to read metadata: %v", err) fs.Debugf(o, "Failed to read metadata: %v", err)
return time.Now() return time.Now()
@@ -1017,7 +1003,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
// FIXME not implemented // FIXME not implemented
return fs.ErrorCantSetModTime return fs.ErrorCantSetModTime
} }
@@ -1028,8 +1014,8 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold) bigObject := o.Size() >= int64(tempLinkThreshold)
if bigObject { if bigObject {
fs.Debugf(o, "Downloading large object via tempLink") fs.Debugf(o, "Downloading large object via tempLink")
} }
@@ -1040,7 +1026,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if !bigObject { if !bigObject {
in, resp, err = file.OpenHeaders(headers) in, resp, err = file.OpenHeaders(headers)
} else { } else {
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers) in, resp, err = file.OpenTempURLHeaders(rest.ClientWithHeaderReset(o.fs.noAuthClient, headers), headers)
} }
return o.fs.shouldRetry(resp, err) return o.fs.shouldRetry(resp, err)
}) })
@@ -1050,7 +1036,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
file := acd.File{Node: o.info} file := acd.File{Node: o.info}
var info *acd.File var info *acd.File
var resp *http.Response var resp *http.Response
@@ -1061,7 +1047,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
info, resp, err = file.Overwrite(in) info, resp, err = file.Overwrite(in)
o.fs.tokenRenewer.Stop() o.fs.tokenRenewer.Stop()
var ok bool var ok bool
ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start)) ok, info, err = o.fs.checkUpload(resp, in, src, info, err, time.Since(start))
if ok { if ok {
return false, nil return false, nil
} }
@@ -1086,7 +1072,7 @@ func (f *Fs) removeNode(info *acd.Node) error {
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
return o.fs.removeNode(o.info) return o.fs.removeNode(o.info)
} }
@@ -1104,7 +1090,7 @@ func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) { func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName)) newInfo, resp, err = info.Rename(newName)
return f.shouldRetry(resp, err) return f.shouldRetry(resp, err)
}) })
return newInfo, err return newInfo, err
@@ -1208,7 +1194,7 @@ OnConflict:
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string { func (o *Object) MimeType() string {
if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil { if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
return *o.info.ContentProperties.ContentType return *o.info.ContentProperties.ContentType
} }
@@ -1221,38 +1207,24 @@ func (o *Object) MimeType(ctx context.Context) string {
// Automatically restarts itself in case of unexpected behaviour of the remote. // Automatically restarts itself in case of unexpected behaviour of the remote.
// //
// Close the returned channel to stop being notified. // Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
checkpoint := f.opt.Checkpoint checkpoint := config.FileGet(f.name, "checkpoint")
quit := make(chan bool)
go func() { go func() {
var ticker *time.Ticker
var tickerC <-chan time.Time
for { for {
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
fs.Debugf(f, "Unable to save checkpoint: %v", err)
}
select { select {
case pollInterval, ok := <-pollIntervalChan: case <-quit:
if !ok { return
if ticker != nil { case <-time.After(pollInterval):
ticker.Stop()
}
return
}
if pollInterval == 0 {
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
} else {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
fs.Debugf(f, "Unable to save checkpoint: %v", err)
}
} }
} }
}() }()
return quit
} }
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string { func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {
@@ -1300,11 +1272,10 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
if len(node.Parents) > 0 { if len(node.Parents) > 0 {
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok { if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
// and append the drive file name to compute the full file name // and append the drive file name to compute the full file name
name := f.opt.Enc.ToStandardName(*node.Name)
if len(path) > 0 { if len(path) > 0 {
path = path + "/" + name path = path + "/" + *node.Name
} else { } else {
path = name path = *node.Name
} }
// this will now clear the actual file too // this will now clear the actual file too
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject}) pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})

View File

@@ -7,9 +7,9 @@ package amazonclouddrive_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/amazonclouddrive" "github.com/ncw/rclone/backend/amazonclouddrive"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

File diff suppressed because it is too large Load Diff

View File

@@ -1,35 +0,0 @@
// +build !plan9,!solaris,!js,go1.13
package azureblob
import (
"testing"
"github.com/stretchr/testify/assert"
)
func (f *Fs) InternalTest(t *testing.T) {
// Check first feature flags are set on this
// remote
enabled := f.Features().SetTier
assert.True(t, enabled)
enabled = f.Features().GetTier
assert.True(t, enabled)
}
func TestIncrement(t *testing.T) {
for _, test := range []struct {
in []byte
want []byte
}{
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
} {
increment(test.in)
assert.Equal(t, test.want, test.in)
}
}

View File

@@ -1,37 +1,17 @@
// Test AzureBlob filesystem interface // Test AzureBlob filesystem interface
package azureblob_test
// +build !plan9,!solaris,!js,go1.13
package azureblob
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/backend/azureblob"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:", RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil), NilObject: (*azureblob.Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -1,6 +0,0 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 solaris js !go1.13
package azureblob

View File

@@ -7,7 +7,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/rclone/rclone/fs/fserrors" "github.com/ncw/rclone/fs/fserrors"
) )
// Error describes a B2 error response // Error describes a B2 error response
@@ -17,12 +17,12 @@ type Error struct {
Message string `json:"message"` // A human-readable message, in English, saying what went wrong. Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
} }
// Error satisfies the error interface // Error statisfies the error interface
func (e *Error) Error() string { func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code) return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
} }
// Fatal satisfies the Fatal interface // Fatal statisfies the Fatal interface
// //
// It indicates which errors should be treated as fatal // It indicates which errors should be treated as fatal
func (e *Error) Fatal() bool { func (e *Error) Fatal() bool {
@@ -31,6 +31,11 @@ func (e *Error) Fatal() bool {
var _ fserrors.Fataler = (*Error)(nil) var _ fserrors.Fataler = (*Error)(nil)
// Account describes a B2 account
type Account struct {
ID string `json:"accountId"` // The identifier for the account.
}
// Bucket describes a B2 bucket // Bucket describes a B2 bucket
type Bucket struct { type Bucket struct {
ID string `json:"bucketId"` ID string `json:"bucketId"`
@@ -50,7 +55,7 @@ type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON (in UTC) // MarshalJSON turns a Timestamp into JSON (in UTC)
func (t *Timestamp) MarshalJSON() (out []byte, err error) { func (t *Timestamp) MarshalJSON() (out []byte, err error) {
timestamp := (*time.Time)(t).UTC().UnixNano() timestamp := (*time.Time)(t).UTC().UnixNano()
return []byte(strconv.FormatInt(timestamp/1e6, 10)), nil return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
} }
// UnmarshalJSON turns JSON into a Timestamp // UnmarshalJSON turns JSON into a Timestamp
@@ -59,7 +64,7 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
*t = Timestamp(time.Unix(timestamp/1e3, (timestamp%1e3)*1e6).UTC()) *t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
return nil return nil
} }
@@ -69,7 +74,7 @@ const versionFormat = "-v2006-01-02-150405.000"
func (t Timestamp) AddVersion(remote string) string { func (t Timestamp) AddVersion(remote string) string {
ext := path.Ext(remote) ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)] base := remote[:len(remote)-len(ext)]
s := time.Time(t).Format(versionFormat) s := (time.Time)(t).Format(versionFormat)
// Replace the '.' with a '-' // Replace the '.' with a '-'
s = strings.Replace(s, ".", "-", -1) s = strings.Replace(s, ".", "-", -1)
return base + s + ext return base + s + ext
@@ -100,22 +105,22 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
return Timestamp(newT), base[:versionStart] + ext return Timestamp(newT), base[:versionStart] + ext
} }
// IsZero returns true if the timestamp is uninitialized // IsZero returns true if the timestamp is unitialised
func (t Timestamp) IsZero() bool { func (t Timestamp) IsZero() bool {
return time.Time(t).IsZero() return (time.Time)(t).IsZero()
} }
// Equal compares two timestamps // Equal compares two timestamps
// //
// If either are !IsZero then it returns false // If either are !IsZero then it returns false
func (t Timestamp) Equal(s Timestamp) bool { func (t Timestamp) Equal(s Timestamp) bool {
if time.Time(t).IsZero() { if (time.Time)(t).IsZero() {
return false return false
} }
if time.Time(s).IsZero() { if (time.Time)(s).IsZero() {
return false return false
} }
return time.Time(t).Equal(time.Time(s)) return (time.Time)(t).Equal((time.Time)(s))
} }
// File is info about a file // File is info about a file
@@ -132,27 +137,10 @@ type File struct {
// AuthorizeAccountResponse is as returned from the b2_authorize_account call // AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct { type AuthorizeAccountResponse struct {
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file. AccountID string `json:"accountId"` // The identifier for the account.
AccountID string `json:"accountId"` // The identifier for the account. AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it. APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket. DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
}
// ListBucketsRequest is parameters for b2_list_buckets call
type ListBucketsRequest struct {
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId,omitempty"` // When specified, the result will be a list containing just this bucket.
BucketName string `json:"bucketName,omitempty"` // When specified, the result will be a list containing just this bucket.
BucketTypes []string `json:"bucketTypes,omitempty"` // If present, B2 will use it as a filter for bucket types returned in the list buckets response.
} }
// ListBucketsResponse is as returned from the b2_list_buckets call // ListBucketsResponse is as returned from the b2_list_buckets call
@@ -189,21 +177,6 @@ type GetUploadURLResponse struct {
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file. AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
} }
// GetDownloadAuthorizationRequest is passed to b2_get_download_authorization
type GetDownloadAuthorizationRequest struct {
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
ValidDurationInSeconds int64 `json:"validDurationInSeconds"` // The number of seconds before the authorization token will expire. The minimum value is 1 second. The maximum value is 604800 which is one week in seconds.
B2ContentDisposition string `json:"b2ContentDisposition,omitempty"` // optional - If this is present, download requests using the returned authorization must include the same value for b2ContentDisposition.
}
// GetDownloadAuthorizationResponse is received from b2_get_download_authorization
type GetDownloadAuthorizationResponse struct {
BucketID string `json:"bucketId"` // The unique ID of the bucket.
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when downloading files, see b2_download_file_by_name.
}
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file // FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
type FileInfo struct { type FileInfo struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version. ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
@@ -326,22 +299,3 @@ type CancelLargeFileResponse struct {
AccountID string `json:"accountId"` // The identifier for the account. AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket. BucketID string `json:"bucketId"` // The unique ID of the bucket.
} }
// CopyFileRequest is as passed to b2_copy_file
type CopyFileRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
Name string `json:"fileName"` // The name of the new file being created.
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
}
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
type CopyPartRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
}

View File

@@ -4,8 +4,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/backend/b2/api" "github.com/ncw/rclone/backend/b2/api"
"github.com/rclone/rclone/fstest" "github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/fstest" "github.com/ncw/rclone/fstest"
) )
// Test b2 string encoding // Test b2 string encoding

View File

@@ -1,34 +1,17 @@
// Test B2 filesystem interface // Test B2 filesystem interface
package b2 package b2_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/backend/b2"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestB2:", RemoteName: "TestB2:",
NilObject: (*Object)(nil), NilObject: (*b2.Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
NeedMultipleChunks: true,
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -6,7 +6,6 @@ package b2
import ( import (
"bytes" "bytes"
"context"
"crypto/sha1" "crypto/sha1"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
@@ -15,14 +14,12 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup"
) )
type hashAppendingReader struct { type hashAppendingReader struct {
@@ -70,32 +67,26 @@ func newHashAppendingReader(in io.Reader, h gohash.Hash) *hashAppendingReader {
// largeUpload is used to control the upload of large files which need chunking // largeUpload is used to control the upload of large files which need chunking
type largeUpload struct { type largeUpload struct {
f *Fs // parent Fs f *Fs // parent Fs
o *Object // object being uploaded o *Object // object being uploaded
doCopy bool // doing copy rather than upload in io.Reader // read the data from here
what string // text name of operation for logs wrap accounting.WrapFn // account parts being transferred
in io.Reader // read the data from here id string // ID of the file being uploaded
wrap accounting.WrapFn // account parts being transferred size int64 // total size
id string // ID of the file being uploaded parts int64 // calculated number of parts, if known
size int64 // total size sha1s []string // slice of SHA1s for each part
parts int64 // calculated number of parts, if known uploadMu sync.Mutex // lock for upload variable
sha1s []string // slice of SHA1s for each part uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from
} }
// newLargeUpload starts an upload of object o from in with metadata in src // newLargeUpload starts an upload of object o from in with metadata in src
// func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
// If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
remote := o.remote remote := o.remote
size := src.Size() size := src.Size()
parts := int64(0) parts := int64(0)
sha1SliceSize := int64(maxParts) sha1SliceSize := int64(maxParts)
if size == -1 { if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize) fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", fs.SizeSuffix(chunkSize), fs.SizeSuffix(maxParts*chunkSize))
} else { } else {
parts = size / int64(chunkSize) parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 { if size%int64(chunkSize) != 0 {
@@ -107,61 +98,47 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
sha1SliceSize = parts sha1SliceSize = parts
} }
modTime := src.ModTime()
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_start_large_file", Path: "/b2_start_large_file",
} }
bucket, bucketPath := o.split() bucketID, err := f.getBucketID()
bucketID, err := f.getBucketID(ctx, bucket)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var request = api.StartLargeFileRequest{ var request = api.StartLargeFileRequest{
BucketID: bucketID, BucketID: bucketID,
Name: f.opt.Enc.FromStandardPath(bucketPath), Name: o.fs.root + remote,
} ContentType: fs.MimeType(src),
if newInfo == nil { Info: map[string]string{
modTime := src.ModTime(ctx)
request.ContentType = fs.MimeType(ctx, src)
request.Info = map[string]string{
timeKey: timeString(modTime), timeKey: timeString(modTime),
} },
// Set the SHA1 if known }
if !o.fs.opt.DisableCheckSum || doCopy { // Set the SHA1 if known
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" { if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1 request.Info[sha1Key] = calculatedSha1
}
}
} else {
request.ContentType = newInfo.ContentType
request.Info = newInfo.Info
} }
var response api.StartLargeFileResponse var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response) resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
up = &largeUpload{
f: f,
o: o,
doCopy: doCopy,
what: "upload",
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, sha1SliceSize),
chunkSize: int64(chunkSize),
}
// unwrap the accounting from the input, we use wrap to put it // unwrap the accounting from the input, we use wrap to put it
// back on after the buffering // back on after the buffering
if doCopy { in, wrap := accounting.UnWrap(in)
up.what = "copy" up = &largeUpload{
up.src = src.(*Object) f: f,
} else { o: o,
up.in, up.wrap = accounting.UnWrap(in) in: in,
wrap: wrap,
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, sha1SliceSize),
} }
return up, nil return up, nil
} }
@@ -169,7 +146,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
// //
// This should be returned with returnUploadURL when finished // This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) { func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock() up.uploadMu.Lock()
defer up.uploadMu.Unlock() defer up.uploadMu.Unlock()
if len(up.uploads) == 0 { if len(up.uploads) == 0 {
@@ -181,8 +158,8 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
ID: up.id, ID: up.id,
} }
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload) resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err) return up.f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL") return nil, errors.Wrap(err, "failed to get upload URL")
@@ -203,13 +180,20 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock() up.uploadMu.Unlock()
} }
// clearUploadURL clears the current UploadURL and the AuthorizationToken
func (up *largeUpload) clearUploadURL() {
up.uploadMu.Lock()
up.uploads = nil
up.uploadMu.Unlock()
}
// Transfer a chunk // Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error { func (up *largeUpload) transferChunk(part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body)) fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL // Get upload URL
upload, err := up.getUploadURL(ctx) upload, err := up.getUploadURL()
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -253,8 +237,8 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
var response api.UploadPartResponse var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response) resp, err := up.f.srv.CallJSON(&opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err) retry, err := up.f.shouldRetry(resp, err)
if err != nil { if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err) fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
} }
@@ -275,41 +259,9 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
return err return err
} }
// Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_part",
}
offset := (part - 1) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{
SourceID: up.src.id,
LargeFileID: up.id,
PartNumber: part,
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
}
var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
up.sha1s[part-1] = response.SHA1
return retry, err
})
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d: %v", part, err)
} else {
fs.Debugf(up.o, "Done copying chunk %d", part)
}
return err
}
// finish closes off the large upload // finish closes off the large upload
func (up *largeUpload) finish(ctx context.Context) error { func (up *largeUpload) finish() error {
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts) fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_finish_large_file", Path: "/b2_finish_large_file",
@@ -320,8 +272,8 @@ func (up *largeUpload) finish(ctx context.Context) error {
} }
var response api.FileInfo var response api.FileInfo
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response) resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(ctx, resp, err) return up.f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return err return err
@@ -330,8 +282,7 @@ func (up *largeUpload) finish(ctx context.Context) error {
} }
// cancel aborts the large upload // cancel aborts the large upload
func (up *largeUpload) cancel(ctx context.Context) error { func (up *largeUpload) cancel() error {
fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_cancel_large_file", Path: "/b2_cancel_large_file",
@@ -341,142 +292,142 @@ func (up *largeUpload) cancel(ctx context.Context) error {
} }
var response api.CancelLargeFileResponse var response api.CancelLargeFileResponse
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response) resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(ctx, resp, err) return up.f.shouldRetry(resp, err)
}) })
if err != nil {
fs.Errorf(up.o, "Failed to cancel large file %s: %v", up.what, err)
}
return err return err
} }
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
wg.Add(1)
go func(part int64, buf []byte) {
defer wg.Done()
defer up.f.putUploadBlock(buf)
err := up.transferChunk(part, buf)
if err != nil {
select {
case errs <- err:
default:
}
}
}(part, buf)
}
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
if err == nil {
select {
case err = <-errs:
default:
}
}
if err != nil {
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
cancelErr := up.cancel()
if cancelErr != nil {
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
}
return err
}
return up.finish()
}
// Stream uploads the chunks from the input, starting with a required initial // Stream uploads the chunks from the input, starting with a required initial
// chunk. Assumes the file size is unknown and will upload until the input // chunk. Assumes the file size is unknown and will upload until the input
// reaches EOF. // reaches EOF.
// func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
// Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id) fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
var ( errs := make(chan error, 1)
g, gCtx = errgroup.WithContext(ctx) hasMoreParts := true
hasMoreParts = true var wg sync.WaitGroup
)
// Transfer initial chunk
up.size = int64(len(initialUploadBlock)) up.size = int64(len(initialUploadBlock))
g.Go(func() error { up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var buf []byte
if part == 1 {
buf = initialUploadBlock
} else {
buf = up.f.getBuf(false)
}
// Fail fast, in case an errgroup managed function returns an error outer:
// gCtx is cancelled. There is no point in uploading all the other parts. for part := int64(2); hasMoreParts; part++ {
if gCtx.Err() != nil { // Check any errors
up.f.putBuf(buf, false) select {
return nil case err = <-errs:
} break outer
default:
// Read the chunk
var n int
if part == 1 {
n = len(buf)
} else {
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil {
// other kinds of errors indicate failure
up.f.putBuf(buf, false)
return err
}
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, false)
return up.transferChunk(gCtx, part, buf)
})
} }
return nil
}) // Get a block of memory
err = g.Wait() buf := up.f.getUploadBlock()
if err != nil {
return err // Read the chunk
var n int
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
err = nil
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putUploadBlock(buf)
err = nil
break outer
} else if err != nil {
// other kinds of errors indicate failure
up.f.putUploadBlock(buf)
break outer
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
err = errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
break outer
}
// Transfer the chunk
up.managedTransferChunk(&wg, errs, part, buf)
} }
wg.Wait()
up.sha1s = up.sha1s[:up.parts] up.sha1s = up.sha1s[:up.parts]
return up.finish(ctx)
return up.finishOrCancelOnError(err, errs)
} }
// Upload uploads the chunks from the input // Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) (err error) { func (up *largeUpload) Upload() error {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })() fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id) remaining := up.size
var ( errs := make(chan error, 1)
g, gCtx = errgroup.WithContext(ctx) var wg sync.WaitGroup
remaining = up.size var err error
) outer:
g.Go(func() error { for part := int64(1); part <= up.parts; part++ {
for part := int64(1); part <= up.parts; part++ { // Check any errors
// Get a block of memory from the pool and token which limits concurrency. select {
buf := up.f.getBuf(up.doCopy) case err = <-errs:
break outer
// Fail fast, in case an errgroup managed function returns an error default:
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, up.doCopy)
return nil
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putBuf(buf, up.doCopy)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, up.doCopy)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
})
remaining -= reqSize
} }
return nil
}) reqSize := remaining
err = g.Wait() if reqSize >= int64(chunkSize) {
if err != nil { reqSize = int64(chunkSize)
return err }
// Get a block of memory
buf := up.f.getUploadBlock()[:reqSize]
// Read the chunk
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putUploadBlock(buf)
break outer
}
// Transfer the chunk
up.managedTransferChunk(&wg, errs, part, buf)
remaining -= reqSize
} }
return up.finish(ctx) wg.Wait()
return up.finishOrCancelOnError(err, errs)
} }

View File

@@ -45,7 +45,7 @@ type Error struct {
RequestID string `json:"request_id"` RequestID string `json:"request_id"`
} }
// Error returns a string for the error and satisfies the error interface // Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string { func (e *Error) Error() string {
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status) out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
if e.Message != "" { if e.Message != "" {
@@ -57,11 +57,11 @@ func (e *Error) Error() string {
return out return out
} }
// Check Error satisfies the error interface // Check Error statisfies the error interface
var _ error = (*Error)(nil) var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo // ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link" var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
// Types of things in Item // Types of things in Item
const ( const (
@@ -86,10 +86,6 @@ type Item struct {
ContentCreatedAt Time `json:"content_created_at"` ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"` ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
} }
// ModTime returns the modification time of the item // ModTime returns the modification time of the item
@@ -149,14 +145,6 @@ type CopyFile struct {
Parent Parent `json:"parent"` Parent Parent `json:"parent"`
} }
// CreateSharedLink is the request for Public Link
type CreateSharedLink struct {
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
}
// UploadSessionRequest is uses in Create Upload Session // UploadSessionRequest is uses in Create Upload Session
type UploadSessionRequest struct { type UploadSessionRequest struct {
FolderID string `json:"folder_id,omitempty"` // don't pass for update FolderID string `json:"folder_id,omitempty"` // don't pass for update
@@ -184,8 +172,8 @@ type UploadSessionResponse struct {
// Part defines the return from upload part call which are passed to commit upload also // Part defines the return from upload part call which are passed to commit upload also
type Part struct { type Part struct {
PartID string `json:"part_id"` PartID string `json:"part_id"`
Offset int64 `json:"offset"` Offset int `json:"offset"`
Size int64 `json:"size"` Size int `json:"size"`
Sha1 string `json:"sha1"` Sha1 string `json:"sha1"`
} }
@@ -202,43 +190,3 @@ type CommitUpload struct {
ContentModifiedAt Time `json:"content_modified_at"` ContentModifiedAt Time `json:"content_modified_at"`
} `json:"attributes"` } `json:"attributes"`
} }
// ConfigJSON defines the shape of a box config.json
type ConfigJSON struct {
BoxAppSettings AppSettings `json:"boxAppSettings"`
EnterpriseID string `json:"enterpriseID"`
}
// AppSettings defines the shape of the boxAppSettings within box config.json
type AppSettings struct {
ClientID string `json:"clientID"`
ClientSecret string `json:"clientSecret"`
AppAuth AppAuth `json:"appAuth"`
}
// AppAuth defines the shape of the appAuth within boxAppSettings in config.json
type AppAuth struct {
PublicKeyID string `json:"publicKeyID"`
PrivateKey string `json:"privateKey"`
Passphrase string `json:"passphrase"`
}
// User is returned from /users/me
type User struct {
Type string `json:"type"`
ID string `json:"id"`
Name string `json:"name"`
Login string `json:"login"`
CreatedAt time.Time `json:"created_at"`
ModifiedAt time.Time `json:"modified_at"`
Language string `json:"language"`
Timezone string `json:"timezone"`
SpaceAmount int64 `json:"space_amount"`
SpaceUsed int64 `json:"space_used"`
MaxUploadSize int64 `json:"max_upload_size"`
Status string `json:"status"`
JobTitle string `json:"job_title"`
Phone string `json:"phone"`
Address string `json:"address"`
AvatarURL string `json:"avatar_url"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -4,8 +4,8 @@ package box_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/box" "github.com/ncw/rclone/backend/box"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -4,7 +4,6 @@ package box
import ( import (
"bytes" "bytes"
"context"
"crypto/sha1" "crypto/sha1"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
@@ -15,16 +14,15 @@ import (
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/rest"
) )
// createUploadSession creates an upload session for the object // createUploadSession creates an upload session for the object
func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) { func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/files/upload_sessions", Path: "/files/upload_sessions",
@@ -39,11 +37,11 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
} else { } else {
opts.Path = "/files/upload_sessions" opts.Path = "/files/upload_sessions"
request.FolderID = directoryID request.FolderID = directoryID
request.FileName = o.fs.opt.Enc.FromStandardName(leaf) request.FileName = replaceReservedChars(leaf)
} }
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response) resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
return return
@@ -55,7 +53,7 @@ func sha1Digest(digest []byte) string {
} }
// uploadPart uploads a part in an upload session // uploadPart uploads a part in an upload session
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn, options ...fs.OpenOption) (response *api.UploadPartResponse, err error) { func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
chunkSize := int64(len(chunk)) chunkSize := int64(len(chunk))
sha1sum := sha1.Sum(chunk) sha1sum := sha1.Sum(chunk)
opts := rest.Opts{ opts := rest.Opts{
@@ -65,7 +63,6 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
ContentType: "application/octet-stream", ContentType: "application/octet-stream",
ContentLength: &chunkSize, ContentLength: &chunkSize,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize), ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
Options: options,
ExtraHeaders: map[string]string{ ExtraHeaders: map[string]string{
"Digest": sha1Digest(sha1sum[:]), "Digest": sha1Digest(sha1sum[:]),
}, },
@@ -73,7 +70,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
opts.Body = wrap(bytes.NewReader(chunk)) opts.Body = wrap(bytes.NewReader(chunk))
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response) resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -83,7 +80,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
} }
// commitUpload finishes an upload session // commitUpload finishes an upload session
func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) { func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/files/upload_sessions/" + SessionID + "/commit", Path: "/files/upload_sessions/" + SessionID + "/commit",
@@ -99,15 +96,13 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
request.Attributes.ContentCreatedAt = api.Time(modTime) request.Attributes.ContentCreatedAt = api.Time(modTime)
var body []byte var body []byte
var resp *http.Response var resp *http.Response
// For discussion of this value see: maxTries := fs.Config.LowLevelRetries
// https://github.com/rclone/rclone/issues/2054
maxTries := o.fs.opt.CommitRetries
const defaultDelay = 10 const defaultDelay = 10
var tries int var tries int
outer: outer:
for tries = 0; tries < maxTries; tries++ { for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil) resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
if err != nil { if err != nil {
return shouldRetry(resp, err) return shouldRetry(resp, err)
} }
@@ -115,7 +110,7 @@ outer:
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
delay := defaultDelay delay := defaultDelay
var why string why := "unknown"
if err != nil { if err != nil {
// Sometimes we get 400 Error with // Sometimes we get 400 Error with
// parts_mismatch immediately after uploading // parts_mismatch immediately after uploading
@@ -157,7 +152,7 @@ outer:
} }
// abortUpload cancels an upload session // abortUpload cancels an upload session
func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error) { func (o *Object) abortUpload(SessionID string) (err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "DELETE", Method: "DELETE",
Path: "/files/upload_sessions/" + SessionID, Path: "/files/upload_sessions/" + SessionID,
@@ -166,16 +161,16 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
} }
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts) resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
return err return err
} }
// uploadMultipart uploads a file using multipart upload // uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time, options ...fs.OpenOption) (err error) { func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
// Create upload session // Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size) session, err := o.createUploadSession(leaf, directoryID, size)
if err != nil { if err != nil {
return errors.Wrap(err, "multipart upload create session failed") return errors.Wrap(err, "multipart upload create session failed")
} }
@@ -183,13 +178,15 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize)) fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
// Cancel the session if something went wrong // Cancel the session if something went wrong
defer atexit.OnError(&err, func() { defer func() {
fs.Debugf(o, "Cancelling multipart upload: %v", err) if err != nil {
cancelErr := o.abortUpload(ctx, session.ID) fs.Debugf(o, "Cancelling multipart upload: %v", err)
if cancelErr != nil { cancelErr := o.abortUpload(session.ID)
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr) if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
}
} }
})() }()
// unwrap the accounting from the input, we use wrap to put it // unwrap the accounting from the input, we use wrap to put it
// back on after the buffering // back on after the buffering
@@ -212,8 +209,8 @@ outer:
} }
reqSize := remaining reqSize := remaining
if reqSize >= chunkSize { if reqSize >= int64(chunkSize) {
reqSize = chunkSize reqSize = int64(chunkSize)
} }
// Make a block of memory // Make a block of memory
@@ -236,7 +233,7 @@ outer:
defer wg.Done() defer wg.Done()
defer o.fs.uploadToken.Put() defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize)) fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...) partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
if err != nil { if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part") err = errors.Wrap(err, "multipart upload failed to upload part")
select { select {
@@ -264,7 +261,7 @@ outer:
} }
// Finalise the upload session // Finalise the upload session
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil)) result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
if err != nil { if err != nil {
return errors.Wrap(err, "multipart upload failed to finalize") return errors.Wrap(err, "multipart upload failed to finalize")
} }

1014
backend/cache/cache.go vendored

File diff suppressed because it is too large Load Diff

View File

@@ -1,14 +1,9 @@
// +build !plan9,!js // +build !plan9
// +build !race
package cache_test package cache_test
import ( import (
"bytes" "bytes"
"context"
"encoding/base64"
goflag "flag"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"log" "log"
@@ -16,24 +11,35 @@ import (
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"runtime/debug" "runtime"
"strconv"
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/backend/crypt" "encoding/base64"
_ "github.com/rclone/rclone/backend/drive" goflag "flag"
"github.com/rclone/rclone/backend/local" "fmt"
"github.com/rclone/rclone/fs" "runtime/debug"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "encoding/json"
"github.com/rclone/rclone/fs/object" "net/http"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/testy" "github.com/ncw/rclone/backend/cache"
"github.com/rclone/rclone/lib/random" "github.com/ncw/rclone/backend/crypt"
"github.com/rclone/rclone/vfs/vfsflags" _ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/object"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fs/rc/rcflags"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
flag "github.com/spf13/pflag"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -49,7 +55,9 @@ const (
var ( var (
remoteName string remoteName string
mountDir string
uploadDir string uploadDir string
useMount bool
runInstance *run runInstance *run
errNotSupported = errors.New("not supported") errNotSupported = errors.New("not supported")
decryptedToEncryptedRemotes = map[string]string{ decryptedToEncryptedRemotes = map[string]string{
@@ -85,7 +93,9 @@ var (
func init() { func init() {
goflag.StringVar(&remoteName, "remote-internal", "TestInternalCache", "Remote to test with, defaults to local filesystem") goflag.StringVar(&remoteName, "remote-internal", "TestInternalCache", "Remote to test with, defaults to local filesystem")
goflag.StringVar(&mountDir, "mount-dir-internal", "", "")
goflag.StringVar(&uploadDir, "upload-dir-internal", "", "") goflag.StringVar(&uploadDir, "upload-dir-internal", "", "")
goflag.BoolVar(&useMount, "cache-use-mount", false, "Test only with mount")
} }
// TestMain drives the tests // TestMain drives the tests
@@ -93,7 +103,7 @@ func TestMain(m *testing.M) {
goflag.Parse() goflag.Parse()
var rc int var rc int
log.Printf("Running with the following params: \n remote: %v", remoteName) log.Printf("Running with the following params: \n remote: %v, \n mount: %v", remoteName, useMount)
runInstance = newRun() runInstance = newRun()
rc = m.Run() rc = m.Run()
os.Exit(rc) os.Exit(rc)
@@ -115,7 +125,7 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
listRootInner, err := runInstance.list(t, rootFs, innerFolder) listRootInner, err := runInstance.list(t, rootFs, innerFolder)
require.NoError(t, err) require.NoError(t, err)
listInner, err := rootFs2.List(context.Background(), "") listInner, err := rootFs2.List("")
require.NoError(t, err) require.NoError(t, err)
require.Len(t, listRoot, 1) require.Len(t, listRoot, 1)
@@ -130,13 +140,13 @@ func TestInternalVfsCache(t *testing.T) {
vfsflags.Opt.CacheMode = vfs.CacheModeWrites vfsflags.Opt.CacheMode = vfs.CacheModeWrites
id := "tiuufo" id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"cache-writes": "true", "cache-info-age": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test") err := rootFs.Mkdir("test")
require.NoError(t, err) require.NoError(t, err)
runInstance.writeObjectString(t, rootFs, "test/second", "content") runInstance.writeObjectString(t, rootFs, "test/second", "content")
_, err = rootFs.List(context.Background(), "test") _, err = rootFs.List("test")
require.NoError(t, err) require.NoError(t, err)
testReader := runInstance.randomReader(t, testSize) testReader := runInstance.randomReader(t, testSize)
@@ -261,13 +271,37 @@ func TestInternalObjNotFound(t *testing.T) {
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
obj, err := rootFs.NewObject(context.Background(), "404") obj, err := rootFs.NewObject("404")
require.Error(t, err) require.Error(t, err)
require.Nil(t, obj) require.Nil(t, obj)
} }
func TestInternalRemoteWrittenFileFoundInMount(t *testing.T) {
if !runInstance.useMount {
t.Skip("test needs mount mode")
}
id := fmt.Sprintf("tirwffim%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
var testData []byte
if runInstance.rootIsCrypt {
testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
require.NoError(t, err)
} else {
testData = []byte("test content")
}
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test"), testData)
data, err := runInstance.readDataFromRemote(t, rootFs, "test", 0, int64(len([]byte("test content"))), false)
require.NoError(t, err)
require.Equal(t, "test content", string(data))
}
func TestInternalCachedWrittenContentMatches(t *testing.T) { func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix()) id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -311,7 +345,6 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
} }
func TestInternalCachedUpdatedContentMatches(t *testing.T) { func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticucm%v", time.Now().Unix()) id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -326,8 +359,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64) testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
require.NoError(t, err) require.NoError(t, err)
} else { } else {
testData1 = []byte(random.String(100)) testData1 = []byte(fstest.RandomString(100))
testData2 = []byte(random.String(200)) testData2 = []byte(fstest.RandomString(200))
} }
// write the object // write the object
@@ -359,10 +392,10 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
// write the object // write the object
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData) o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
require.Equal(t, o.Size(), testSize) require.Equal(t, o.Size(), int64(testSize))
time.Sleep(time.Second * 3) time.Sleep(time.Second * 3)
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false) checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(len(checkSample)), o.Size()) require.Equal(t, int64(len(checkSample)), o.Size())
@@ -417,7 +450,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
log.Printf("original size: %v", originalSize) log.Printf("original size: %v", originalSize)
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
expectedSize := int64(len([]byte("test content"))) expectedSize := int64(len([]byte("test content")))
var data2 []byte var data2 []byte
@@ -429,7 +462,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
data2 = []byte("test content") data2 = []byte("test content")
} }
objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap()) objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap())
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo) err = o.Update(bytes.NewReader(data2), objInfo)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(len(data2)), o.Size()) require.Equal(t, int64(len(data2)), o.Size())
log.Printf("updated size: %v", len(data2)) log.Printf("updated size: %v", len(data2))
@@ -475,9 +508,9 @@ func TestInternalMoveWithNotify(t *testing.T) {
} else { } else {
testData = []byte("test content") testData = []byte("test content")
} }
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test")) _ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test"))
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/one")) _ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/one"))
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/second")) _ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/second"))
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData) srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
// list in mount // list in mount
@@ -487,7 +520,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// move file // move file
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName) _, err = cfs.UnWrap().Features().Move(srcObj, dstName)
require.NoError(t, err) require.NoError(t, err)
err = runInstance.retryBlock(func() error { err = runInstance.retryBlock(func() error {
@@ -561,9 +594,9 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
} else { } else {
testData = []byte("test content") testData = []byte("test content")
} }
err = rootFs.Mkdir(context.Background(), "test") err = rootFs.Mkdir("test")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "test/one") err = rootFs.Mkdir("test/one")
require.NoError(t, err) require.NoError(t, err)
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData) srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
@@ -580,7 +613,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
require.False(t, found) require.False(t, found)
// move file // move file
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName) _, err = cfs.UnWrap().Features().Move(srcObj, dstName)
require.NoError(t, err) require.NoError(t, err)
err = runInstance.retryBlock(func() error { err = runInstance.retryBlock(func() error {
@@ -642,28 +675,106 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs // update in the wrapped fs
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour) wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(context.Background(), wrappedTime) err = o.SetModTime(wrappedTime)
require.NoError(t, err) require.NoError(t, err)
// get a new instance from the cache // get a new instance from the cache
co, err := rootFs.NewObject(context.Background(), "data.bin") co, err := rootFs.NewObject("data.bin")
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String()) require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
cfs.DirCacheFlush() // flush the cache cfs.DirCacheFlush() // flush the cache
// get a new instance from the cache // get a new instance from the cache
co, err = rootFs.NewObject(context.Background(), "data.bin") co, err = rootFs.NewObject("data.bin")
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix()) require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
}
func TestInternalChangeSeenAfterRc(t *testing.T) {
rcflags.Opt.Enabled = true
rc.Start(&rcflags.Opt)
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"rc": "true"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.useMount {
t.Skipf("needs mount")
}
if !runInstance.wrappedIsExternal {
t.Skipf("needs drive")
}
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(wrappedTime)
require.NoError(t, err)
// get a new instance from the cache
co, err := rootFs.NewObject("data.bin")
require.NoError(t, err)
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
m := make(map[string]string)
res, err := http.Post(fmt.Sprintf("http://localhost:5572/cache/expire?remote=%s", "data.bin"), "application/json; charset=utf-8", strings.NewReader(""))
require.NoError(t, err)
defer func() {
_ = res.Body.Close()
}()
_ = json.NewDecoder(res.Body).Decode(&m)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
require.Contains(t, m["message"], "cached file cleared")
// get a new instance from the cache
co, err = rootFs.NewObject("data.bin")
require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
li1, err := runInstance.list(t, rootFs, "")
// create some rand test data
testData2 := randStringBytes(int(chunkSize))
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
// list should have 1 item only
li1, err = runInstance.list(t, rootFs, "")
require.Len(t, li1, 1)
m = make(map[string]string)
res2, err := http.Post("http://localhost:5572/cache/expire?remote=/", "application/json; charset=utf-8", strings.NewReader(""))
require.NoError(t, err)
defer func() {
_ = res2.Body.Close()
}()
_ = json.NewDecoder(res2.Body).Decode(&m)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
require.Contains(t, m["message"], "cached directory cleared")
// list should have 2 items now
li2, err := runInstance.list(t, rootFs, "")
require.Len(t, li2, 2)
} }
func TestInternalCacheWrites(t *testing.T) { func TestInternalCacheWrites(t *testing.T) {
id := "ticw" id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-writes": "true"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
@@ -682,7 +793,7 @@ func TestInternalCacheWrites(t *testing.T) {
func TestInternalMaxChunkSizeRespected(t *testing.T) { func TestInternalMaxChunkSizeRespected(t *testing.T) {
id := fmt.Sprintf("timcsr%v", time.Now().Unix()) id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-workers": "1"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
@@ -693,7 +804,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
// create some rand test data // create some rand test data
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2)) testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
o, err := cfs.NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
co, ok := o.(*cache.Object) co, ok := o.(*cache.Object)
require.True(t, ok) require.True(t, ok)
@@ -732,7 +843,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Len(t, l, 1) require.Len(t, l, 1)
err = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/third")) err = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/third"))
require.NoError(t, err) require.NoError(t, err)
l, err = runInstance.list(t, rootFs, "test") l, err = runInstance.list(t, rootFs, "test")
@@ -757,7 +868,7 @@ func TestInternalBug2117(t *testing.T) {
id := fmt.Sprintf("tib2117%v", time.Now().Unix()) id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"}) map[string]string{"cache-info-age": "72h", "cache-chunk-clean-interval": "15m"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
@@ -767,14 +878,14 @@ func TestInternalBug2117(t *testing.T) {
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
err = cfs.UnWrap().Mkdir(context.Background(), "test") err = cfs.UnWrap().Mkdir("test")
require.NoError(t, err) require.NoError(t, err)
for i := 1; i <= 4; i++ { for i := 1; i <= 4; i++ {
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d", i)) err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d", i))
require.NoError(t, err) require.NoError(t, err)
for j := 1; j <= 4; j++ { for j := 1; j <= 4; j++ {
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d/dir%d", i, j)) err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d/dir%d", i, j))
require.NoError(t, err) require.NoError(t, err)
runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test") runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test")
@@ -807,10 +918,19 @@ func TestInternalBug2117(t *testing.T) {
// run holds the remotes for a test run // run holds the remotes for a test run
type run struct { type run struct {
okDiff time.Duration okDiff time.Duration
runDefaultCfgMap configmap.Simple allCfgMap map[string]string
allFlagMap map[string]string
runDefaultCfgMap map[string]string
runDefaultFlagMap map[string]string
mntDir string
tmpUploadDir string tmpUploadDir string
useMount bool
isMounted bool
rootIsCrypt bool rootIsCrypt bool
wrappedIsExternal bool wrappedIsExternal bool
unmountFn func() error
unmountRes chan error
vfs *vfs.VFS
tempFiles []*os.File tempFiles []*os.File
dbPath string dbPath string
chunkPath string chunkPath string
@@ -820,18 +940,68 @@ type run struct {
func newRun() *run { func newRun() *run {
var err error var err error
r := &run{ r := &run{
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
useMount: useMount,
isMounted: false,
} }
// Read in all the defaults for all the options r.allCfgMap = map[string]string{
fsInfo, err := fs.Find("cache") "plex_url": "",
if err != nil { "plex_username": "",
panic(fmt.Sprintf("Couldn't find cache remote: %v", err)) "plex_password": "",
"chunk_size": cache.DefCacheChunkSize,
"info_age": cache.DefCacheInfoAge,
"chunk_total_size": cache.DefCacheTotalChunkSize,
} }
r.runDefaultCfgMap = configmap.Simple{} r.allFlagMap = map[string]string{
for _, option := range fsInfo.Options { "cache-db-path": filepath.Join(config.CacheDir, "cache-backend"),
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default)) "cache-chunk-path": filepath.Join(config.CacheDir, "cache-backend"),
"cache-db-purge": "true",
"cache-chunk-size": cache.DefCacheChunkSize,
"cache-total-chunk-size": cache.DefCacheTotalChunkSize,
"cache-chunk-clean-interval": cache.DefCacheChunkCleanInterval,
"cache-info-age": cache.DefCacheInfoAge,
"cache-read-retries": strconv.Itoa(cache.DefCacheReadRetries),
"cache-workers": strconv.Itoa(cache.DefCacheTotalWorkers),
"cache-chunk-no-memory": "false",
"cache-rps": strconv.Itoa(cache.DefCacheRps),
"cache-writes": "false",
"cache-tmp-upload-path": "",
"cache-tmp-wait-time": cache.DefCacheTmpWaitTime,
} }
r.runDefaultCfgMap = make(map[string]string)
for key, value := range r.allCfgMap {
r.runDefaultCfgMap[key] = value
}
r.runDefaultFlagMap = make(map[string]string)
for key, value := range r.allFlagMap {
r.runDefaultFlagMap[key] = value
}
if mountDir == "" {
if runtime.GOOS != "windows" {
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
if err != nil {
log.Fatalf("Failed to create mount dir: %v", err)
return nil
}
} else {
// Find a free drive letter
drive := ""
for letter := 'E'; letter <= 'Z'; letter++ {
drive = string(letter) + ":"
_, err := os.Stat(drive + "\\")
if os.IsNotExist(err) {
goto found
}
}
log.Print("Couldn't find free drive letter for test")
found:
r.mntDir = drive
}
} else {
r.mntDir = mountDir
}
log.Printf("Mount Dir: %v", r.mntDir)
if uploadDir == "" { if uploadDir == "" {
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp") r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
@@ -872,15 +1042,6 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
return nil, nil return nil, nil
} }
// Config to pass to NewFs
m := configmap.Simple{}
for k, v := range r.runDefaultCfgMap {
m.Set(k, v)
}
for k, v := range flags {
m.Set(k, v)
}
// if the remote doesn't exist, create a new one with a local one for it // if the remote doesn't exist, create a new one with a local one for it
// identify which is the cache remote (it can be wrapped by a crypt too) // identify which is the cache remote (it can be wrapped by a crypt too)
rootIsCrypt := false rootIsCrypt := false
@@ -889,8 +1050,8 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
localRemote := remote + "-local" localRemote := remote + "-local"
config.FileSet(localRemote, "type", "local") config.FileSet(localRemote, "type", "local")
config.FileSet(localRemote, "nounc", "true") config.FileSet(localRemote, "nounc", "true")
m.Set("type", "cache") config.FileSet(remote, "type", "cache")
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote)) config.FileSet(remote, "remote", localRemote+":/var/tmp/"+localRemote)
} else { } else {
remoteType := config.FileGet(remote, "type", "") remoteType := config.FileGet(remote, "type", "")
if remoteType == "" { if remoteType == "" {
@@ -900,8 +1061,8 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
if remoteType != "cache" { if remoteType != "cache" {
if remoteType == "crypt" { if remoteType == "crypt" {
rootIsCrypt = true rootIsCrypt = true
m.Set("password", cryptPassword1) config.FileSet(remote, "password", cryptPassword1)
m.Set("password2", cryptPassword2) config.FileSet(remote, "password2", cryptPassword2)
} }
remoteRemote := config.FileGet(remote, "remote", "") remoteRemote := config.FileGet(remote, "remote", "")
if remoteRemote == "" { if remoteRemote == "" {
@@ -925,6 +1086,20 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true}) boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err) require.NoError(t, err)
for k, v := range r.runDefaultCfgMap {
if c, ok := cfg[k]; ok {
config.FileSet(cacheRemote, k, c)
} else {
config.FileSet(cacheRemote, k, v)
}
}
for k, v := range r.runDefaultFlagMap {
if c, ok := flags[k]; ok {
_ = flag.Set(k, c)
} else {
_ = flag.Set(k, v)
}
}
fs.Config.LowLevelRetries = 1 fs.Config.LowLevelRetries = 1
// Instantiate root // Instantiate root
@@ -932,7 +1107,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
boltDb.PurgeTempUploads() boltDb.PurgeTempUploads()
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id)) _ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
} }
f, err := cache.NewFs(remote, id, m) f, err := fs.NewFs(remote + ":" + id)
require.NoError(t, err) require.NoError(t, err)
cfs, err := r.getCacheFs(f) cfs, err := r.getCacheFs(f)
require.NoError(t, err) require.NoError(t, err)
@@ -946,21 +1121,33 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
} }
if purge { if purge {
_ = f.Features().Purge(context.Background(), "") _ = f.Features().Purge()
require.NoError(t, err) require.NoError(t, err)
} }
err = f.Mkdir(context.Background(), "") err = f.Mkdir("")
require.NoError(t, err) require.NoError(t, err)
if r.useMount && !r.isMounted {
r.mountFs(t, f)
}
return f, boltDb return f, boltDb
} }
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) { func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
err := f.Features().Purge(context.Background(), "") if r.useMount && r.isMounted {
r.unmountFs(t, f)
}
err := f.Features().Purge()
require.NoError(t, err) require.NoError(t, err)
cfs, err := r.getCacheFs(f) cfs, err := r.getCacheFs(f)
require.NoError(t, err) require.NoError(t, err)
cfs.StopBackgroundRunners() cfs.StopBackgroundRunners()
if r.useMount && runtime.GOOS != "windows" {
err = os.RemoveAll(r.mntDir)
require.NoError(t, err)
}
err = os.RemoveAll(r.tmpUploadDir) err = os.RemoveAll(r.tmpUploadDir)
require.NoError(t, err) require.NoError(t, err)
@@ -970,6 +1157,9 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
} }
r.tempFiles = nil r.tempFiles = nil
debug.FreeOSMemory() debug.FreeOSMemory()
for k, v := range r.runDefaultFlagMap {
_ = flag.Set(k, v)
}
} }
func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser { func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
@@ -991,6 +1181,23 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
return f return f
} }
func (r *run) writeRemoteRandomBytes(t *testing.T, f fs.Fs, p string, size int64) string {
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
// create some rand test data
testData := randStringBytes(int(size))
r.writeRemoteBytes(t, f, remote, testData)
return remote
}
func (r *run) writeObjectRandomBytes(t *testing.T, f fs.Fs, p string, size int64) fs.Object {
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
// create some rand test data
testData := randStringBytes(int(size))
return r.writeObjectBytes(t, f, remote, testData)
}
func (r *run) writeRemoteString(t *testing.T, f fs.Fs, remote, content string) { func (r *run) writeRemoteString(t *testing.T, f fs.Fs, remote, content string) {
r.writeRemoteBytes(t, f, remote, []byte(content)) r.writeRemoteBytes(t, f, remote, []byte(content))
} }
@@ -1000,17 +1207,43 @@ func (r *run) writeObjectString(t *testing.T, f fs.Fs, remote, content string) f
} }
func (r *run) writeRemoteBytes(t *testing.T, f fs.Fs, remote string, data []byte) { func (r *run) writeRemoteBytes(t *testing.T, f fs.Fs, remote string, data []byte) {
r.writeObjectBytes(t, f, remote, data) var err error
if r.useMount {
err = r.retryBlock(func() error {
return ioutil.WriteFile(path.Join(r.mntDir, remote), data, 0600)
}, 3, time.Second*3)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
} else {
r.writeObjectBytes(t, f, remote, data)
}
} }
func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.ReadCloser) { func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.ReadCloser) {
r.writeObjectReader(t, f, remote, in) defer func() {
_ = in.Close()
}()
if r.useMount {
out, err := os.Create(path.Join(r.mntDir, remote))
require.NoError(t, err)
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
} else {
r.writeObjectReader(t, f, remote, in)
}
} }
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object { func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
in := bytes.NewReader(data) in := bytes.NewReader(data)
_ = r.writeObjectReader(t, f, remote, in) _ = r.writeObjectReader(t, f, remote, in)
o, err := f.NewObject(context.Background(), remote) o, err := f.NewObject(remote)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(len(data)), o.Size()) require.Equal(t, int64(len(data)), o.Size())
return o return o
@@ -1019,8 +1252,12 @@ func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte
func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object { func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
modTime := time.Now() modTime := time.Now()
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f) objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
obj, err := f.Put(context.Background(), in, objInfo) obj, err := f.Put(in, objInfo)
require.NoError(t, err) require.NoError(t, err)
if r.useMount {
r.vfs.WaitForWriters(10 * time.Second)
}
return obj return obj
} }
@@ -1028,16 +1265,26 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
var err error var err error
var obj fs.Object var obj fs.Object
in1 := bytes.NewReader(data1) if r.useMount {
in2 := bytes.NewReader(data2) err = ioutil.WriteFile(path.Join(r.mntDir, remote), data1, 0600)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f) require.NoError(t, err)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f) r.vfs.WaitForWriters(10 * time.Second)
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
obj, err = f.NewObject(remote)
} else {
in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
obj, err = f.Put(context.Background(), in1, objInfo1) obj, err = f.Put(in1, objInfo1)
require.NoError(t, err) require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote) obj, err = f.NewObject(remote)
require.NoError(t, err) require.NoError(t, err)
err = obj.Update(context.Background(), in2, objInfo2) err = obj.Update(in2, objInfo2)
}
require.NoError(t, err) require.NoError(t, err)
return obj return obj
@@ -1047,12 +1294,30 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
size := end - offset size := end - offset
checkSample := make([]byte, size) checkSample := make([]byte, size)
co, err := f.NewObject(context.Background(), remote) if r.useMount {
if err != nil { f, err := os.Open(path.Join(r.mntDir, remote))
return checkSample, err defer func() {
_ = f.Close()
}()
if err != nil {
return checkSample, err
}
_, _ = f.Seek(offset, io.SeekStart)
totalRead, err := io.ReadFull(f, checkSample)
checkSample = checkSample[:totalRead]
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = nil
}
if err != nil {
return checkSample, err
}
} else {
co, err := f.NewObject(remote)
if err != nil {
return checkSample, err
}
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
} }
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
if !noLengthCheck && size != int64(len(checkSample)) { if !noLengthCheck && size != int64(len(checkSample)) {
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size) return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
} }
@@ -1062,7 +1327,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte { func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
size := end - offset size := end - offset
checkSample := make([]byte, size) checkSample := make([]byte, size)
reader, err := o.Open(context.Background(), &fs.SeekOption{Offset: offset}) reader, err := o.Open(&fs.SeekOption{Offset: offset})
require.NoError(t, err) require.NoError(t, err)
totalRead, err := io.ReadFull(reader, checkSample) totalRead, err := io.ReadFull(reader, checkSample)
if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck { if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
@@ -1075,19 +1340,28 @@ func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLe
} }
func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) { func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
err := f.Mkdir(context.Background(), remote) var err error
if r.useMount {
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
} else {
err = f.Mkdir(remote)
}
require.NoError(t, err) require.NoError(t, err)
} }
func (r *run) rm(t *testing.T, f fs.Fs, remote string) error { func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
var err error var err error
var obj fs.Object if r.useMount {
obj, err = f.NewObject(context.Background(), remote) err = os.Remove(path.Join(r.mntDir, remote))
if err != nil {
err = f.Rmdir(context.Background(), remote)
} else { } else {
err = obj.Remove(context.Background()) var obj fs.Object
obj, err = f.NewObject(remote)
if err != nil {
err = f.Rmdir(remote)
} else {
err = obj.Remove()
}
} }
return err return err
@@ -1096,14 +1370,42 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) { func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
var err error var err error
var l []interface{} var l []interface{}
var list fs.DirEntries if r.useMount {
list, err = f.List(context.Background(), remote) var list []os.FileInfo
for _, ll := range list { list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
l = append(l, ll) for _, ll := range list {
l = append(l, ll)
}
} else {
var list fs.DirEntries
list, err = f.List(remote)
for _, ll := range list {
l = append(l, ll)
}
} }
return l, err return l, err
} }
func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
var err error
var l []string
if r.useMount {
var list []os.FileInfo
list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
for _, ll := range list {
l = append(l, ll.Name())
}
} else {
var list fs.DirEntries
list, err = f.List(remote)
for _, ll := range list {
l = append(l, ll.Remote())
}
}
require.NoError(t, err)
return l
}
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error { func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src) in, err := os.Open(src)
if err != nil { if err != nil {
@@ -1128,8 +1430,14 @@ func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error { func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error var err error
if rootFs.Features().DirMove != nil { if runInstance.useMount {
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst) err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().DirMove != nil {
err = rootFs.Features().DirMove(rootFs, src, dst)
if err != nil { if err != nil {
return err return err
} }
@@ -1144,12 +1452,18 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error { func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error var err error
if rootFs.Features().Move != nil { if runInstance.useMount {
obj1, err := rootFs.NewObject(context.Background(), src) err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
if err != nil { if err != nil {
return err return err
} }
_, err = rootFs.Features().Move(context.Background(), obj1, dst) r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Move != nil {
obj1, err := rootFs.NewObject(src)
if err != nil {
return err
}
_, err = rootFs.Features().Move(obj1, dst)
if err != nil { if err != nil {
return err return err
} }
@@ -1164,12 +1478,18 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error { func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error var err error
if rootFs.Features().Copy != nil { if r.useMount {
obj, err := rootFs.NewObject(context.Background(), src) err = r.copyFile(t, rootFs, path.Join(r.mntDir, src), path.Join(r.mntDir, dst))
if err != nil { if err != nil {
return err return err
} }
_, err = rootFs.Features().Copy(context.Background(), obj, dst) r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Copy != nil {
obj, err := rootFs.NewObject(src)
if err != nil {
return err
}
_, err = rootFs.Features().Copy(obj, dst)
if err != nil { if err != nil {
return err return err
} }
@@ -1184,17 +1504,31 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error) { func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error) {
var err error var err error
obj1, err := rootFs.NewObject(context.Background(), src) if r.useMount {
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
if err != nil {
return time.Time{}, err
}
return fi.ModTime(), nil
}
obj1, err := rootFs.NewObject(src)
if err != nil { if err != nil {
return time.Time{}, err return time.Time{}, err
} }
return obj1.ModTime(context.Background()), nil return obj1.ModTime(), nil
} }
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) { func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
var err error var err error
obj1, err := rootFs.NewObject(context.Background(), src) if r.useMount {
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
if err != nil {
return int64(0), err
}
return fi.Size(), nil
}
obj1, err := rootFs.NewObject(src)
if err != nil { if err != nil {
return int64(0), err return int64(0), err
} }
@@ -1204,15 +1538,26 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) error { func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) error {
var err error var err error
var obj1 fs.Object if r.useMount {
obj1, err = rootFs.NewObject(context.Background(), src) f, err := os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil { if err != nil {
return err return err
}
defer func() {
_ = f.Close()
r.vfs.WaitForWriters(10 * time.Second)
}()
_, err = f.WriteString(data + append)
} else {
obj1, err := rootFs.NewObject(src)
if err != nil {
return err
}
data1 := []byte(data + append)
r := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(r, objInfo1)
} }
data1 := []byte(data + append)
reader := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), reader, objInfo1)
return err return err
} }
@@ -1336,13 +1681,15 @@ func (r *run) getCacheFs(f fs.Fs) (*cache.Fs, error) {
cfs, ok := f.(*cache.Fs) cfs, ok := f.(*cache.Fs)
if ok { if ok {
return cfs, nil return cfs, nil
} } else {
if f.Features().UnWrap != nil { if f.Features().UnWrap != nil {
cfs, ok := f.Features().UnWrap().(*cache.Fs) cfs, ok := f.Features().UnWrap().(*cache.Fs)
if ok { if ok {
return cfs, nil return cfs, nil
}
} }
} }
return nil, errors.New("didn't found a cache fs") return nil, errors.New("didn't found a cache fs")
} }

78
backend/cache/cache_mount_unix_test.go vendored Normal file
View File

@@ -0,0 +1,78 @@
// +build !plan9,!windows
package cache_test
import (
"os"
"testing"
"time"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/cmd/mount"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require"
)
func (r *run) mountFs(t *testing.T, f fs.Fs) {
device := f.Name() + ":" + f.Root()
var options = []fuse.MountOption{
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
fuse.Subtype("rclone"),
fuse.FSName(device), fuse.VolumeName(device),
fuse.NoAppleDouble(),
fuse.NoAppleXattr(),
//fuse.AllowOther(),
}
err := os.MkdirAll(r.mntDir, os.ModePerm)
require.NoError(t, err)
c, err := fuse.Mount(r.mntDir, options...)
require.NoError(t, err)
filesys := mount.NewFS(f)
server := fusefs.New(c, nil)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
err := server.Serve(filesys)
closeErr := c.Close()
if err == nil {
err = closeErr
}
r.unmountRes <- err
}()
// check if the mount process has an error to report
<-c.Ready
require.NoError(t, c.MountError)
r.unmountFn = func() error {
// Shutdown the VFS
filesys.VFS.Shutdown()
return fuse.Unmount(r.mntDir)
}
r.vfs = filesys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

View File

@@ -0,0 +1,124 @@
// +build windows
package cache_test
import (
"fmt"
"os"
"testing"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/ncw/rclone/cmd/cmount"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
)
// waitFor runs fn() until it returns true or the timeout expires
func waitFor(fn func() bool) (ok bool) {
const totalWait = 10 * time.Second
const individualWait = 10 * time.Millisecond
for i := 0; i < int(totalWait/individualWait); i++ {
ok = fn()
if ok {
return ok
}
time.Sleep(individualWait)
}
return false
}
func (r *run) mountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
device := f.Name() + ":" + f.Root()
options := []string{
"-o", "fsname=" + device,
"-o", "subtype=rclone",
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
"-o", "uid=-1",
"-o", "gid=-1",
"-o", "allow_other",
// This causes FUSE to supply O_TRUNC with the Open
// call which is more efficient for cmount. However
// it does not work with cgofuse on Windows with
// WinFSP so cmount must work with or without it.
"-o", "atomic_o_trunc",
"--FileSystemName=rclone",
}
fsys := cmount.NewFS(f)
host := fuse.NewFileSystemHost(fsys)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
var err error
ok := host.Mount(r.mntDir, options)
if !ok {
err = errors.New("mount failed")
}
r.unmountRes <- err
}()
// unmount
r.unmountFn = func() error {
// Shutdown the VFS
fsys.VFS.Shutdown()
if host.Unmount() {
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err != nil
}) {
t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
}
return nil
}
return errors.New("host unmount failed")
}
// Wait for the filesystem to become ready, checking the file
// system didn't blow up before starting
select {
case err := <-r.unmountRes:
require.NoError(t, err)
case <-time.After(time.Second * 3):
}
// Wait for the mount point to be available on Windows
// On Windows the Init signal comes slightly before the mount is ready
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err == nil
}) {
t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
}
r.vfs = fsys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

View File

@@ -1,25 +1,21 @@
// Test Cache filesystem interface // Test Cache filesystem interface
// +build !plan9,!js // +build !plan9
// +build !race
package cache_test package cache_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/cache" "github.com/ncw/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/local" _ "github.com/ncw/rclone/backend/local"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:", RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil), NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
}) })
} }

View File

@@ -1,6 +1,6 @@
// Build for cache for unsupported platforms to stop go complaining // Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
// +build plan9 js // +build plan9
package cache package cache

View File

@@ -1,11 +1,8 @@
// +build !plan9,!js // +build !plan9
// +build !race
package cache_test package cache_test
import ( import (
"context"
"fmt"
"math/rand" "math/rand"
"os" "os"
"path" "path"
@@ -13,9 +10,11 @@ import (
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/backend/cache" "fmt"
_ "github.com/rclone/rclone/backend/drive"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -23,7 +22,7 @@ func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix()) id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil, nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)}) map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id)) _, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
@@ -64,7 +63,7 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix()) id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil, nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"}) map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb) testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
@@ -74,7 +73,7 @@ func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix()) id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil, nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"}) map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb) testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
@@ -84,14 +83,14 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix()) id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil, nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"}) map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one") err := rootFs.Mkdir("one")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "one/test") err = rootFs.Mkdir("one/test")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "second") err = rootFs.Mkdir("second")
require.NoError(t, err) require.NoError(t, err)
// create some rand test data // create some rand test data
@@ -124,11 +123,11 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"}) map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one") err := rootFs.Mkdir("one")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "one/test") err = rootFs.Mkdir("one/test")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "second") err = rootFs.Mkdir("second")
require.NoError(t, err) require.NoError(t, err)
// create some rand test data // create some rand test data
@@ -164,10 +163,10 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix()) id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil, nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"}) map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test") err := rootFs.Mkdir("test")
require.NoError(t, err) require.NoError(t, err)
minSize := 5242880 minSize := 5242880
maxSize := 10485760 maxSize := 10485760
@@ -214,7 +213,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo" id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil, nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"}) map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads() boltDb.PurgeTempUploads()
@@ -235,9 +234,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.dirMove(t, rootFs, "test", "second") err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "second/one") _, err = rootFs.NewObject("second/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -258,7 +257,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.rm(t, rootFs, "test") err = runInstance.rm(t, rootFs, "test")
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), "directory not empty") require.Contains(t, err.Error(), "directory not empty")
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -272,9 +271,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
// try to read from it // try to read from it
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/second") _, err = rootFs.NewObject("test/second")
require.NoError(t, err) require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err) require.NoError(t, err)
@@ -291,9 +290,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third")) err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/third") _, err = rootFs.NewObject("test/third")
require.NoError(t, err) require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err) require.NoError(t, err)
@@ -308,7 +307,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
// test Remove -- allowed // test Remove -- allowed
err = runInstance.rm(t, rootFs, "test/one") err = runInstance.rm(t, rootFs, "test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.Error(t, err) require.Error(t, err)
// validate that it doesn't exist in temp fs // validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -320,7 +319,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated") err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
require.NoError(t, err) require.NoError(t, err)
obj2, err := rootFs.NewObject(context.Background(), "test/one") obj2, err := rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false) data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
require.Equal(t, "one content updated", string(data2)) require.Equal(t, "one content updated", string(data2))
@@ -344,7 +343,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo" id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil, nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"}) map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads() boltDb.PurgeTempUploads()
@@ -368,7 +367,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
err = runInstance.dirMove(t, rootFs, "test", "second") err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported { if err != errNotSupported {
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -380,7 +379,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
// test Rmdir // test Rmdir
err = runInstance.rm(t, rootFs, "test") err = runInstance.rm(t, rootFs, "test")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it doesn't exist in temp fs // validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -391,9 +390,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
if err != errNotSupported { if err != errNotSupported {
require.Error(t, err) require.Error(t, err)
// try to read from it // try to read from it
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/second") _, err = rootFs.NewObject("test/second")
require.Error(t, err) require.Error(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -406,9 +405,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third")) err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/third") _, err = rootFs.NewObject("test/third")
require.NoError(t, err) require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err) require.NoError(t, err)
@@ -423,7 +422,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
// test Remove // test Remove
err = runInstance.rm(t, rootFs, "test/one") err = runInstance.rm(t, rootFs, "test/one")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it doesn't exist in temp fs // validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))

View File

@@ -1,18 +1,18 @@
// +build !plan9,!js // +build !plan9
package cache package cache
import ( import (
"context"
"path"
"time" "time"
"github.com/rclone/rclone/fs" "path"
"github.com/ncw/rclone/fs"
) )
// Directory is a generic dir that stores basic information about it // Directory is a generic dir that stores basic information about it
type Directory struct { type Directory struct {
Directory fs.Directory `json:"-"` // can be nil fs.Directory `json:"-"`
CacheFs *Fs `json:"-"` // cache fs CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory Name string `json:"name"` // name of the directory
@@ -56,7 +56,7 @@ func ShallowDirectory(f *Fs, remote string) *Directory {
} }
// DirectoryFromOriginal builds one from a generic fs.Directory // DirectoryFromOriginal builds one from a generic fs.Directory
func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Directory { func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
var cd *Directory var cd *Directory
fullRemote := path.Join(f.Root(), d.Remote()) fullRemote := path.Join(f.Root(), d.Remote())
@@ -68,7 +68,7 @@ func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Director
CacheFs: f, CacheFs: f,
Name: name, Name: name,
Dir: dir, Dir: dir,
CacheModTime: d.ModTime(ctx).UnixNano(), CacheModTime: d.ModTime().UnixNano(),
CacheSize: d.Size(), CacheSize: d.Size(),
CacheItems: d.Items(), CacheItems: d.Items(),
CacheType: "Directory", CacheType: "Directory",
@@ -101,8 +101,17 @@ func (d *Directory) abs() string {
return cleanPath(path.Join(d.Dir, d.Name)) return cleanPath(path.Join(d.Dir, d.Name))
} }
// parentRemote returns the absolute path parent remote
func (d *Directory) parentRemote() string {
absPath := d.abs()
if absPath == "" {
return ""
}
return cleanPath(path.Dir(absPath))
}
// ModTime returns the cached ModTime // ModTime returns the cached ModTime
func (d *Directory) ModTime(ctx context.Context) time.Time { func (d *Directory) ModTime() time.Time {
return time.Unix(0, d.CacheModTime) return time.Unix(0, d.CacheModTime)
} }
@@ -116,14 +125,6 @@ func (d *Directory) Items() int64 {
return d.CacheItems return d.CacheItems
} }
// ID returns the ID of the cached directory if known
func (d *Directory) ID() string {
if d.Directory == nil {
return ""
}
return d.Directory.ID()
}
var ( var (
_ fs.Directory = (*Directory)(nil) _ fs.Directory = (*Directory)(nil)
) )

View File

@@ -1,20 +1,20 @@
// +build !plan9,!js // +build !plan9
package cache package cache
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"path"
"runtime"
"strings"
"sync" "sync"
"time" "time"
"path"
"runtime"
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
) )
var uploaderMap = make(map[string]*backgroundWriter) var uploaderMap = make(map[string]*backgroundWriter)
@@ -41,7 +41,6 @@ func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
// Handle is managing the read/write/seek operations on an open handle // Handle is managing the read/write/seek operations on an open handle
type Handle struct { type Handle struct {
ctx context.Context
cachedObject *Object cachedObject *Object
cfs *Fs cfs *Fs
memory *Memory memory *Memory
@@ -50,32 +49,30 @@ type Handle struct {
offset int64 offset int64
seenOffsets map[int64]bool seenOffsets map[int64]bool
mu sync.Mutex mu sync.Mutex
workersWg sync.WaitGroup
confirmReading chan bool confirmReading chan bool
workers int
maxWorkerID int UseMemory bool
UseMemory bool workers []*worker
closed bool closed bool
reading bool reading bool
} }
// NewObjectHandle returns a new Handle for an existing Object // NewObjectHandle returns a new Handle for an existing Object
func NewObjectHandle(ctx context.Context, o *Object, cfs *Fs) *Handle { func NewObjectHandle(o *Object, cfs *Fs) *Handle {
r := &Handle{ r := &Handle{
ctx: ctx,
cachedObject: o, cachedObject: o,
cfs: cfs, cfs: cfs,
offset: 0, offset: 0,
preloadOffset: -1, // -1 to trigger the first preload preloadOffset: -1, // -1 to trigger the first preload
UseMemory: !cfs.opt.ChunkNoMemory, UseMemory: cfs.chunkMemory,
reading: false, reading: false,
} }
r.seenOffsets = make(map[int64]bool) r.seenOffsets = make(map[int64]bool)
r.memory = NewMemory(-1) r.memory = NewMemory(-1)
// create a larger buffer to queue up requests // create a larger buffer to queue up requests
r.preloadQueue = make(chan int64, r.cfs.opt.TotalWorkers*10) r.preloadQueue = make(chan int64, r.cfs.totalWorkers*10)
r.confirmReading = make(chan bool) r.confirmReading = make(chan bool)
r.startReadWorkers() r.startReadWorkers()
return r return r
@@ -98,10 +95,10 @@ func (r *Handle) String() string {
// startReadWorkers will start the worker pool // startReadWorkers will start the worker pool
func (r *Handle) startReadWorkers() { func (r *Handle) startReadWorkers() {
if r.workers > 0 { if r.hasAtLeastOneWorker() {
return return
} }
totalWorkers := r.cacheFs().opt.TotalWorkers totalWorkers := r.cacheFs().totalWorkers
if r.cacheFs().plexConnector.isConfigured() { if r.cacheFs().plexConnector.isConfigured() {
if !r.cacheFs().plexConnector.isConnected() { if !r.cacheFs().plexConnector.isConnected() {
@@ -120,27 +117,26 @@ func (r *Handle) startReadWorkers() {
// scaleOutWorkers will increase the worker pool count by the provided amount // scaleOutWorkers will increase the worker pool count by the provided amount
func (r *Handle) scaleWorkers(desired int) { func (r *Handle) scaleWorkers(desired int) {
current := r.workers current := len(r.workers)
if current == desired { if current == desired {
return return
} }
if current > desired { if current > desired {
// scale in gracefully // scale in gracefully
for r.workers > desired { for i := 0; i < current-desired; i++ {
r.preloadQueue <- -1 r.preloadQueue <- -1
r.workers--
} }
} else { } else {
// scale out // scale out
for r.workers < desired { for i := 0; i < desired-current; i++ {
w := &worker{ w := &worker{
r: r, r: r,
id: r.maxWorkerID, ch: r.preloadQueue,
id: current + i,
} }
r.workersWg.Add(1)
r.workers++
r.maxWorkerID++
go w.run() go w.run()
r.workers = append(r.workers, w)
} }
} }
// ignore first scale out from 0 // ignore first scale out from 0
@@ -152,7 +148,7 @@ func (r *Handle) scaleWorkers(desired int) {
func (r *Handle) confirmExternalReading() { func (r *Handle) confirmExternalReading() {
// if we have a max value of workers // if we have a max value of workers
// then we skip this step // then we skip this step
if r.workers > 1 || if len(r.workers) > 1 ||
!r.cacheFs().plexConnector.isConfigured() { !r.cacheFs().plexConnector.isConfigured() {
return return
} }
@@ -160,7 +156,7 @@ func (r *Handle) confirmExternalReading() {
return return
} }
fs.Infof(r, "confirmed reading by external reader") fs.Infof(r, "confirmed reading by external reader")
r.scaleWorkers(r.cacheFs().opt.TotalWorkers) r.scaleWorkers(r.cacheFs().totalMaxWorkers)
} }
// queueOffset will send an offset to the workers if it's different from the last one // queueOffset will send an offset to the workers if it's different from the last one
@@ -182,8 +178,8 @@ func (r *Handle) queueOffset(offset int64) {
} }
} }
for i := 0; i < r.workers; i++ { for i := 0; i < len(r.workers); i++ {
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i) o := r.preloadOffset + r.cacheFs().chunkSize*int64(i)
if o < 0 || o >= r.cachedObject.Size() { if o < 0 || o >= r.cachedObject.Size() {
continue continue
} }
@@ -197,6 +193,16 @@ func (r *Handle) queueOffset(offset int64) {
} }
} }
func (r *Handle) hasAtLeastOneWorker() bool {
oneWorker := false
for i := 0; i < len(r.workers); i++ {
if r.workers[i].isRunning() {
oneWorker = true
}
}
return oneWorker
}
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it // getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
// it can be from transient or persistent cache // it can be from transient or persistent cache
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer // it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
@@ -205,7 +211,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
var err error var err error
// we calculate the modulus of the requested offset with the size of a chunk // we calculate the modulus of the requested offset with the size of a chunk
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize) offset := chunkStart % r.cacheFs().chunkSize
// we align the start offset of the first chunk to a likely chunk in the storage // we align the start offset of the first chunk to a likely chunk in the storage
chunkStart = chunkStart - offset chunkStart = chunkStart - offset
@@ -222,7 +228,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
if !found { if !found {
// we're gonna give the workers a chance to pickup the chunk // we're gonna give the workers a chance to pickup the chunk
// and retry a couple of times // and retry a couple of times
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ { for i := 0; i < r.cacheFs().readRetries*8; i++ {
data, err = r.storage().GetChunk(r.cachedObject, chunkStart) data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
if err == nil { if err == nil {
found = true found = true
@@ -237,7 +243,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
// not found in ram or // not found in ram or
// the worker didn't managed to download the chunk in time so we abort and close the stream // the worker didn't managed to download the chunk in time so we abort and close the stream
if err != nil || len(data) == 0 || !found { if err != nil || len(data) == 0 || !found {
if r.workers == 0 { if !r.hasAtLeastOneWorker() {
fs.Errorf(r, "out of workers") fs.Errorf(r, "out of workers")
return nil, io.ErrUnexpectedEOF return nil, io.ErrUnexpectedEOF
} }
@@ -249,7 +255,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
if offset > 0 { if offset > 0 {
if offset > int64(len(data)) { if offset > int64(len(data)) {
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v", fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
r.offset, chunkStart, len(data), offset, r.cacheFs().opt.ChunkSize, r.cachedObject.Size()) r.offset, chunkStart, len(data), offset, r.cacheFs().chunkSize, r.cachedObject.Size())
return nil, io.ErrUnexpectedEOF return nil, io.ErrUnexpectedEOF
} }
data = data[int(offset):] data = data[int(offset):]
@@ -298,7 +304,14 @@ func (r *Handle) Close() error {
close(r.preloadQueue) close(r.preloadQueue)
r.closed = true r.closed = true
// wait for workers to complete their jobs before returning // wait for workers to complete their jobs before returning
r.workersWg.Wait() waitCount := 3
for i := 0; i < len(r.workers); i++ {
waitIdx := 0
for r.workers[i].isRunning() && waitIdx < waitCount {
time.Sleep(time.Second)
waitIdx++
}
}
r.memory.db.Flush() r.memory.db.Flush()
fs.Debugf(r, "cache reader closed %v", r.offset) fs.Debugf(r, "cache reader closed %v", r.offset)
@@ -325,9 +338,9 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
err = errors.Errorf("cache: unimplemented seek whence %v", whence) err = errors.Errorf("cache: unimplemented seek whence %v", whence)
} }
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize)) chunkStart := r.offset - (r.offset % r.cacheFs().chunkSize)
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) { if chunkStart >= r.cacheFs().chunkSize {
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize) chunkStart = chunkStart - r.cacheFs().chunkSize
} }
r.queueOffset(chunkStart) r.queueOffset(chunkStart)
@@ -335,9 +348,12 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
} }
type worker struct { type worker struct {
r *Handle r *Handle
rc io.ReadCloser ch <-chan int64
id int rc io.ReadCloser
id int
running bool
mu sync.Mutex
} }
// String is a representation of this worker // String is a representation of this worker
@@ -354,7 +370,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
r := w.rc r := w.rc
if w.rc == nil { if w.rc == nil {
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) { r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
return w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1}) return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -364,7 +380,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
if !closeOpen { if !closeOpen {
if do, ok := r.(fs.RangeSeeker); ok { if do, ok := r.(fs.RangeSeeker); ok {
_, err = do.RangeSeek(w.r.ctx, offset, io.SeekStart, end-offset) _, err = do.RangeSeek(offset, io.SeekStart, end-offset)
return r, err return r, err
} else if do, ok := r.(io.Seeker); ok { } else if do, ok := r.(io.Seeker); ok {
_, err = do.Seek(offset, io.SeekStart) _, err = do.Seek(offset, io.SeekStart)
@@ -374,7 +390,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
_ = w.rc.Close() _ = w.rc.Close()
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) { return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
r, err = w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1}) r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -382,19 +398,33 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
}) })
} }
func (w *worker) isRunning() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.running
}
func (w *worker) setRunning(f bool) {
w.mu.Lock()
defer w.mu.Unlock()
w.running = f
}
// run is the main loop for the worker which receives offsets to preload // run is the main loop for the worker which receives offsets to preload
func (w *worker) run() { func (w *worker) run() {
var err error var err error
var data []byte var data []byte
defer w.setRunning(false)
defer func() { defer func() {
if w.rc != nil { if w.rc != nil {
_ = w.rc.Close() _ = w.rc.Close()
w.setRunning(false)
} }
w.r.workersWg.Done()
}() }()
for { for {
chunkStart, open := <-w.r.preloadQueue chunkStart, open := <-w.ch
w.setRunning(true)
if chunkStart < 0 || !open { if chunkStart < 0 || !open {
break break
} }
@@ -421,7 +451,7 @@ func (w *worker) run() {
} }
} }
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize) chunkEnd := chunkStart + w.r.cacheFs().chunkSize
// TODO: Remove this comment if it proves to be reliable for #1896 // TODO: Remove this comment if it proves to be reliable for #1896
//if chunkEnd > w.r.cachedObject.Size() { //if chunkEnd > w.r.cachedObject.Size() {
// chunkEnd = w.r.cachedObject.Size() // chunkEnd = w.r.cachedObject.Size()
@@ -436,7 +466,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
var data []byte var data []byte
// stop retries // stop retries
if retry >= w.r.cacheFs().opt.ReadRetries { if retry >= w.r.cacheFs().readRetries {
return return
} }
// back-off between retries // back-off between retries
@@ -452,7 +482,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
// we seem to be getting only errors so we abort // we seem to be getting only errors so we abort
if err != nil { if err != nil {
fs.Errorf(w, "object open failed %v: %v", chunkStart, err) fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true) err = w.r.cachedObject.refreshFromSource(true)
if err != nil { if err != nil {
fs.Errorf(w, "%v", err) fs.Errorf(w, "%v", err)
} }
@@ -465,7 +495,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
sourceRead, err = io.ReadFull(w.rc, data) sourceRead, err = io.ReadFull(w.rc, data)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err) fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true) err = w.r.cachedObject.refreshFromSource(true)
if err != nil { if err != nil {
fs.Errorf(w, "%v", err) fs.Errorf(w, "%v", err)
} }
@@ -582,7 +612,7 @@ func (b *backgroundWriter) run() {
return return
} }
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), time.Duration(b.fs.opt.TempWaitTime)) absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), b.fs.tempWriteWait)
if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) { if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) {
time.Sleep(time.Second) time.Sleep(time.Second)
continue continue
@@ -591,7 +621,7 @@ func (b *backgroundWriter) run() {
remote := b.fs.cleanRootFromPath(absPath) remote := b.fs.cleanRootFromPath(absPath)
b.notify(remote, BackgroundUploadStarted, nil) b.notify(remote, BackgroundUploadStarted, nil)
fs.Infof(remote, "background upload: started upload") fs.Infof(remote, "background upload: started upload")
err = operations.MoveFile(context.TODO(), b.fs.UnWrap(), b.fs.tempFs, remote, remote) err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote)
if err != nil { if err != nil {
b.notify(remote, BackgroundUploadError, err) b.notify(remote, BackgroundUploadError, err)
_ = b.fs.cache.rollbackPendingUpload(absPath) _ = b.fs.cache.rollbackPendingUpload(absPath)
@@ -601,14 +631,14 @@ func (b *backgroundWriter) run() {
// clean empty dirs up to root // clean empty dirs up to root
thisDir := cleanPath(path.Dir(remote)) thisDir := cleanPath(path.Dir(remote))
for thisDir != "" { for thisDir != "" {
thisList, err := b.fs.tempFs.List(context.TODO(), thisDir) thisList, err := b.fs.tempFs.List(thisDir)
if err != nil { if err != nil {
break break
} }
if len(thisList) > 0 { if len(thisList) > 0 {
break break
} }
err = b.fs.tempFs.Rmdir(context.TODO(), thisDir) err = b.fs.tempFs.Rmdir(thisDir)
fs.Debugf(thisDir, "cleaned from temp path") fs.Debugf(thisDir, "cleaned from temp path")
if err != nil { if err != nil {
break break

View File

@@ -1,18 +1,17 @@
// +build !plan9,!js // +build !plan9
package cache package cache
import ( import (
"context"
"io" "io"
"path" "path"
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
) )
const ( const (
@@ -24,16 +23,15 @@ const (
type Object struct { type Object struct {
fs.Object `json:"-"` fs.Object `json:"-"`
ParentFs fs.Fs `json:"-"` // parent fs ParentFs fs.Fs `json:"-"` // parent fs
CacheFs *Fs `json:"-"` // cache fs CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the object Dir string `json:"dir"` // abs path of the object
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheStorable bool `json:"storable"` // says whether this object can be stored CacheStorable bool `json:"storable"` // says whether this object can be stored
CacheType string `json:"cacheType"` CacheType string `json:"cacheType"`
CacheTs time.Time `json:"cacheTs"` CacheTs time.Time `json:"cacheTs"`
cacheHashesMu sync.Mutex
CacheHashes map[hash.Type]string // all supported hashes cached CacheHashes map[hash.Type]string // all supported hashes cached
refreshMutex sync.Mutex refreshMutex sync.Mutex
@@ -46,7 +44,7 @@ func NewObject(f *Fs, remote string) *Object {
cacheType := objectInCache cacheType := objectInCache
parentFs := f.UnWrap() parentFs := f.UnWrap()
if f.opt.TempWritePath != "" { if f.tempWritePath != "" {
_, err := f.cache.SearchPendingUpload(fullRemote) _, err := f.cache.SearchPendingUpload(fullRemote)
if err == nil { // queued for upload if err == nil { // queued for upload
cacheType = objectPendingUpload cacheType = objectPendingUpload
@@ -70,14 +68,14 @@ func NewObject(f *Fs, remote string) *Object {
} }
// ObjectFromOriginal builds one from a generic fs.Object // ObjectFromOriginal builds one from a generic fs.Object
func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object { func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
var co *Object var co *Object
fullRemote := cleanPath(path.Join(f.Root(), o.Remote())) fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
dir, name := path.Split(fullRemote) dir, name := path.Split(fullRemote)
cacheType := objectInCache cacheType := objectInCache
parentFs := f.UnWrap() parentFs := f.UnWrap()
if f.opt.TempWritePath != "" { if f.tempWritePath != "" {
_, err := f.cache.SearchPendingUpload(fullRemote) _, err := f.cache.SearchPendingUpload(fullRemote)
if err == nil { // queued for upload if err == nil { // queued for upload
cacheType = objectPendingUpload cacheType = objectPendingUpload
@@ -94,19 +92,17 @@ func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
CacheType: cacheType, CacheType: cacheType,
CacheTs: time.Now(), CacheTs: time.Now(),
} }
co.updateData(ctx, o) co.updateData(o)
return co return co
} }
func (o *Object) updateData(ctx context.Context, source fs.Object) { func (o *Object) updateData(source fs.Object) {
o.Object = source o.Object = source
o.CacheModTime = source.ModTime(ctx).UnixNano() o.CacheModTime = source.ModTime().UnixNano()
o.CacheSize = source.Size() o.CacheSize = source.Size()
o.CacheStorable = source.Storable() o.CacheStorable = source.Storable()
o.CacheTs = time.Now() o.CacheTs = time.Now()
o.cacheHashesMu.Lock()
o.CacheHashes = make(map[hash.Type]string) o.CacheHashes = make(map[hash.Type]string)
o.cacheHashesMu.Unlock()
} }
// Fs returns its FS info // Fs returns its FS info
@@ -134,20 +130,20 @@ func (o *Object) abs() string {
} }
// ModTime returns the cached ModTime // ModTime returns the cached ModTime
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
_ = o.refresh(ctx) _ = o.refresh()
return time.Unix(0, o.CacheModTime) return time.Unix(0, o.CacheModTime)
} }
// Size returns the cached Size // Size returns the cached Size
func (o *Object) Size() int64 { func (o *Object) Size() int64 {
_ = o.refresh(context.TODO()) _ = o.refresh()
return o.CacheSize return o.CacheSize
} }
// Storable returns the cached Storable // Storable returns the cached Storable
func (o *Object) Storable() bool { func (o *Object) Storable() bool {
_ = o.refresh(context.TODO()) _ = o.refresh()
return o.CacheStorable return o.CacheStorable
} }
@@ -155,18 +151,18 @@ func (o *Object) Storable() bool {
// all these conditions must be true to ignore a refresh // all these conditions must be true to ignore a refresh
// 1. cache ts didn't expire yet // 1. cache ts didn't expire yet
// 2. is not pending a notification from the wrapped fs // 2. is not pending a notification from the wrapped fs
func (o *Object) refresh(ctx context.Context) error { func (o *Object) refresh() error {
isNotified := o.CacheFs.isNotifiedRemote(o.Remote()) isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge))) isExpired := time.Now().After(o.CacheTs.Add(o.CacheFs.fileAge))
if !isExpired && !isNotified { if !isExpired && !isNotified {
return nil return nil
} }
return o.refreshFromSource(ctx, true) return o.refreshFromSource(true)
} }
// refreshFromSource requests the original FS for the object in case it comes from a cached entry // refreshFromSource requests the original FS for the object in case it comes from a cached entry
func (o *Object) refreshFromSource(ctx context.Context, force bool) error { func (o *Object) refreshFromSource(force bool) error {
o.refreshMutex.Lock() o.refreshMutex.Lock()
defer o.refreshMutex.Unlock() defer o.refreshMutex.Unlock()
var err error var err error
@@ -176,29 +172,29 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
return nil return nil
} }
if o.isTempFile() { if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote()) liveObject, err = o.ParentFs.NewObject(o.Remote())
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs) err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
} else { } else {
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote()) liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs) err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
} }
if err != nil { if err != nil {
fs.Errorf(o, "error refreshing object in : %v", err) fs.Errorf(o, "error refreshing object in : %v", err)
return err return err
} }
o.updateData(ctx, liveObject) o.updateData(liveObject)
o.persist() o.persist()
return nil return nil
} }
// SetModTime sets the ModTime of this object // SetModTime sets the ModTime of this object
func (o *Object) SetModTime(ctx context.Context, t time.Time) error { func (o *Object) SetModTime(t time.Time) error {
if err := o.refreshFromSource(ctx, false); err != nil { if err := o.refreshFromSource(false); err != nil {
return err return err
} }
err := o.Object.SetModTime(ctx, t) err := o.Object.SetModTime(t)
if err != nil { if err != nil {
return err return err
} }
@@ -211,19 +207,13 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
} }
// Open is used to request a specific part of the file using fs.RangeOption // Open is used to request a specific part of the file using fs.RangeOption
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
var err error if err := o.refreshFromSource(true); err != nil {
if o.Object == nil {
err = o.refreshFromSource(ctx, true)
} else {
err = o.refresh(ctx)
}
if err != nil {
return nil, err return nil, err
} }
cacheReader := NewObjectHandle(ctx, o, o.CacheFs) var err error
cacheReader := NewObjectHandle(o, o.CacheFs)
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
@@ -242,12 +232,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
} }
// Update will change the object data // Update will change the object data
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if err := o.refreshFromSource(ctx, false); err != nil { if err := o.refreshFromSource(false); err != nil {
return err return err
} }
// pause background uploads if active // pause background uploads if active
if o.CacheFs.opt.TempWritePath != "" { if o.CacheFs.tempWritePath != "" {
o.CacheFs.backgroundRunner.pause() o.CacheFs.backgroundRunner.pause()
defer o.CacheFs.backgroundRunner.play() defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads // don't allow started uploads
@@ -258,7 +248,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "updating object contents with size %v", src.Size()) fs.Debugf(o, "updating object contents with size %v", src.Size())
// FIXME use reliable upload // FIXME use reliable upload
err := o.Object.Update(ctx, in, src, options...) err := o.Object.Update(in, src, options...)
if err != nil { if err != nil {
fs.Errorf(o, "error updating source: %v", err) fs.Errorf(o, "error updating source: %v", err)
return err return err
@@ -269,11 +259,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// advertise to ChangeNotify if wrapped doesn't do that // advertise to ChangeNotify if wrapped doesn't do that
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject) o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
o.CacheModTime = src.ModTime(ctx).UnixNano() o.CacheModTime = src.ModTime().UnixNano()
o.CacheSize = src.Size() o.CacheSize = src.Size()
o.cacheHashesMu.Lock()
o.CacheHashes = make(map[hash.Type]string) o.CacheHashes = make(map[hash.Type]string)
o.cacheHashesMu.Unlock()
o.CacheTs = time.Now() o.CacheTs = time.Now()
o.persist() o.persist()
@@ -281,12 +269,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove deletes the object from both the cache and the source // Remove deletes the object from both the cache and the source
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
if err := o.refreshFromSource(ctx, false); err != nil { if err := o.refreshFromSource(false); err != nil {
return err return err
} }
// pause background uploads if active // pause background uploads if active
if o.CacheFs.opt.TempWritePath != "" { if o.CacheFs.tempWritePath != "" {
o.CacheFs.backgroundRunner.pause() o.CacheFs.backgroundRunner.pause()
defer o.CacheFs.backgroundRunner.play() defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads // don't allow started uploads
@@ -294,7 +282,7 @@ func (o *Object) Remove(ctx context.Context) error {
return errors.Errorf("%v is currently uploading, can't delete", o) return errors.Errorf("%v is currently uploading, can't delete", o)
} }
} }
err := o.Object.Remove(ctx) err := o.Object.Remove()
if err != nil { if err != nil {
return err return err
} }
@@ -312,27 +300,24 @@ func (o *Object) Remove(ctx context.Context) error {
// Hash requests a hash of the object and stores in the cache // Hash requests a hash of the object and stores in the cache
// since it might or might not be called, this is lazy loaded // since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { func (o *Object) Hash(ht hash.Type) (string, error) {
_ = o.refresh(ctx) _ = o.refresh()
o.cacheHashesMu.Lock()
if o.CacheHashes == nil { if o.CacheHashes == nil {
o.CacheHashes = make(map[hash.Type]string) o.CacheHashes = make(map[hash.Type]string)
} }
cachedHash, found := o.CacheHashes[ht] cachedHash, found := o.CacheHashes[ht]
o.cacheHashesMu.Unlock()
if found { if found {
return cachedHash, nil return cachedHash, nil
} }
if err := o.refreshFromSource(ctx, false); err != nil { if err := o.refreshFromSource(false); err != nil {
return "", err return "", err
} }
liveHash, err := o.Object.Hash(ctx, ht) liveHash, err := o.Object.Hash(ht)
if err != nil { if err != nil {
return "", err return "", err
} }
o.cacheHashesMu.Lock()
o.CacheHashes[ht] = liveHash o.CacheHashes[ht] = liveHash
o.cacheHashesMu.Unlock()
o.persist() o.persist()
fs.Debugf(o, "object hash cached: %v", liveHash) fs.Debugf(o, "object hash cached: %v", liveHash)
@@ -368,13 +353,6 @@ func (o *Object) tempFileStartedUpload() bool {
return started return started
} }
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *Object) UnWrap() fs.Object {
return o.Object
}
var ( var (
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
) )

50
backend/cache/plex.go vendored
View File

@@ -1,21 +1,23 @@
// +build !plan9,!js // +build !plan9
package cache package cache
import ( import (
"bytes"
"crypto/tls"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
"sync"
"time" "time"
cache "github.com/patrickmn/go-cache" "sync"
"github.com/rclone/rclone/fs"
"bytes"
"io/ioutil"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/patrickmn/go-cache"
"golang.org/x/net/websocket" "golang.org/x/net/websocket"
) )
@@ -53,17 +55,15 @@ type plexConnector struct {
username string username string
password string password string
token string token string
insecure bool
f *Fs f *Fs
mu sync.Mutex mu sync.Mutex
running bool running bool
runningMu sync.Mutex runningMu sync.Mutex
stateCache *cache.Cache stateCache *cache.Cache
saveToken func(string)
} }
// newPlexConnector connects to a Plex server and generates a token // newPlexConnector connects to a Plex server and generates a token
func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool, saveToken func(string)) (*plexConnector, error) { func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/")) u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil { if err != nil {
return nil, err return nil, err
@@ -75,16 +75,14 @@ func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool,
username: username, username: username,
password: password, password: password,
token: "", token: "",
insecure: insecure,
stateCache: cache.New(time.Hour, time.Minute), stateCache: cache.New(time.Hour, time.Minute),
saveToken: saveToken,
} }
return pc, nil return pc, nil
} }
// newPlexConnector connects to a Plex server and generates a token // newPlexConnector connects to a Plex server and generates a token
func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*plexConnector, error) { func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/")) u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil { if err != nil {
return nil, err return nil, err
@@ -94,7 +92,6 @@ func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*pl
f: f, f: f,
url: u, url: u,
token: token, token: token,
insecure: insecure,
stateCache: cache.New(time.Hour, time.Minute), stateCache: cache.New(time.Hour, time.Minute),
} }
pc.listenWebsocket() pc.listenWebsocket()
@@ -109,26 +106,14 @@ func (p *plexConnector) closeWebsocket() {
p.running = false p.running = false
} }
func (p *plexConnector) websocketDial() (*websocket.Conn, error) {
u := strings.TrimRight(strings.Replace(strings.Replace(
p.url.String(), "http://", "ws://", 1), "https://", "wss://", 1), "/")
url := fmt.Sprintf(defPlexNotificationURL, u, p.token)
config, err := websocket.NewConfig(url, "http://localhost")
if err != nil {
return nil, err
}
if p.insecure {
config.TlsConfig = &tls.Config{InsecureSkipVerify: true}
}
return websocket.DialConfig(config)
}
func (p *plexConnector) listenWebsocket() { func (p *plexConnector) listenWebsocket() {
p.runningMu.Lock() p.runningMu.Lock()
defer p.runningMu.Unlock() defer p.runningMu.Unlock()
conn, err := p.websocketDial() u := strings.Replace(p.url.String(), "http://", "ws://", 1)
u = strings.Replace(u, "https://", "wss://", 1)
conn, err := websocket.Dial(fmt.Sprintf(defPlexNotificationURL, strings.TrimRight(u, "/"), p.token),
"", "http://localhost")
if err != nil { if err != nil {
fs.Errorf("plex", "%v", err) fs.Errorf("plex", "%v", err)
return return
@@ -224,9 +209,8 @@ func (p *plexConnector) authenticate() error {
} }
p.token = token p.token = token
if p.token != "" { if p.token != "" {
if p.saveToken != nil { config.FileSet(p.f.Name(), "plex_token", p.token)
p.saveToken(p.token) config.SaveConfig()
}
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String()) fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
} }
p.listenWebsocket() p.listenWebsocket()

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js // +build !plan9
package cache package cache
@@ -7,9 +7,9 @@ import (
"strings" "strings"
"time" "time"
cache "github.com/patrickmn/go-cache" "github.com/ncw/rclone/fs"
"github.com/patrickmn/go-cache"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
) )
// Memory is a wrapper of transient storage for a go-cache store // Memory is a wrapper of transient storage for a go-cache store

View File

@@ -1,25 +1,27 @@
// +build !plan9,!js // +build !plan9
package cache package cache
import ( import (
"time"
"bytes" "bytes"
"context"
"encoding/binary" "encoding/binary"
"encoding/json" "encoding/json"
"fmt"
"io/ioutil"
"os" "os"
"path" "path"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"time"
"io/ioutil"
"fmt"
bolt "github.com/coreos/bbolt"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/walk"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
) )
// Constants // Constants
@@ -32,8 +34,7 @@ const (
// Features flags for this storage type // Features flags for this storage type
type Features struct { type Features struct {
PurgeDb bool // purge the db before starting PurgeDb bool // purge the db before starting
DbWaitTime time.Duration // time to wait for DB to be available
} }
var boltMap = make(map[string]*Persistent) var boltMap = make(map[string]*Persistent)
@@ -121,7 +122,7 @@ func (b *Persistent) connect() error {
if err != nil { if err != nil {
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath) return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
} }
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime}) b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: *cacheDbWaitTime})
if err != nil { if err != nil {
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath) return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
} }
@@ -341,7 +342,7 @@ func (b *Persistent) RemoveDir(fp string) error {
// ExpireDir will flush a CachedDirectory and all its objects from the objects // ExpireDir will flush a CachedDirectory and all its objects from the objects
// chunks will remain as they are // chunks will remain as they are
func (b *Persistent) ExpireDir(cd *Directory) error { func (b *Persistent) ExpireDir(cd *Directory) error {
t := time.Now().Add(time.Duration(-cd.CacheFs.opt.InfoAge)) t := time.Now().Add(cd.CacheFs.fileAge * -1)
cd.CacheTs = &t cd.CacheTs = &t
// expire all parents // expire all parents
@@ -399,7 +400,7 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
if err != nil { if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err) return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
} }
err = bucket.Put([]byte(cachedObject.Name), encoded) err = bucket.Put([]byte(cachedObject.Name), []byte(encoded))
if err != nil { if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err) return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
} }
@@ -428,7 +429,7 @@ func (b *Persistent) RemoveObject(fp string) error {
// ExpireObject will flush an Object and all its data if desired // ExpireObject will flush an Object and all its data if desired
func (b *Persistent) ExpireObject(co *Object, withData bool) error { func (b *Persistent) ExpireObject(co *Object, withData bool) error {
co.CacheTs = time.Now().Add(time.Duration(-co.CacheFs.opt.InfoAge)) co.CacheTs = time.Now().Add(co.CacheFs.fileAge * -1)
err := b.AddObject(co) err := b.AddObject(co)
if withData { if withData {
_ = os.RemoveAll(path.Join(b.dataPath, co.abs())) _ = os.RemoveAll(path.Join(b.dataPath, co.abs()))
@@ -767,6 +768,31 @@ func (b *Persistent) iterateBuckets(buk *bolt.Bucket, bucketFn func(name string)
return err return err
} }
func (b *Persistent) dumpRoot() string {
var itBuckets func(buk *bolt.Bucket) map[string]interface{}
itBuckets = func(buk *bolt.Bucket) map[string]interface{} {
m := make(map[string]interface{})
c := buk.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if v == nil {
buk2 := buk.Bucket(k)
m[string(k)] = itBuckets(buk2)
} else {
m[string(k)] = "-"
}
}
return m
}
var mm map[string]interface{}
_ = b.db.View(func(tx *bolt.Tx) error {
mm = itBuckets(tx.Bucket([]byte(RootBucket)))
return nil
})
raw, _ := json.MarshalIndent(mm, "", " ")
return string(raw)
}
// addPendingUpload adds a new file to the pending queue of uploads // addPendingUpload adds a new file to the pending queue of uploads
func (b *Persistent) addPendingUpload(destPath string, started bool) error { func (b *Persistent) addPendingUpload(destPath string, started bool) error {
return b.db.Update(func(tx *bolt.Tx) error { return b.db.Update(func(tx *bolt.Tx) error {
@@ -785,7 +811,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
if err != nil { if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err) return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
} }
err = bucket.Put([]byte(destPath), encoded) err = bucket.Put([]byte(destPath), []byte(encoded))
if err != nil { if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err) return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
} }
@@ -980,8 +1006,17 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
}) })
} }
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
// TO BE USED IN TESTING ONLY
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
item.Started = true
return nil
})
}
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue // ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error { func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
return b.db.Update(func(tx *bolt.Tx) error { return b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket)) _ = tx.DeleteBucket([]byte(tempBucket))
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
@@ -990,7 +1025,7 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
} }
var queuedEntries []fs.Object var queuedEntries []fs.Object
err = walk.ListR(ctx, cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error { err = walk.Walk(cacheFs.tempFs, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
for _, o := range entries { for _, o := range entries {
if oo, ok := o.(fs.Object); ok { if oo, ok := o.(fs.Object); ok {
queuedEntries = append(queuedEntries, oo) queuedEntries = append(queuedEntries, oo)
@@ -1016,7 +1051,7 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
if err != nil { if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err) return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
} }
err = bucket.Put([]byte(destPath), encoded) err = bucket.Put([]byte(destPath), []byte(encoded))
if err != nil { if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err) return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
} }
@@ -1027,6 +1062,19 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
}) })
} }
// PurgeTempUploads will remove all the pending uploads from the queue
// TO BE USED IN TESTING ONLY
func (b *Persistent) PurgeTempUploads() {
b.tempQueueMux.Lock()
defer b.tempQueueMux.Unlock()
_ = b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
return nil
})
}
// Close should be called when the program ends gracefully // Close should be called when the program ends gracefully
func (b *Persistent) Close() { func (b *Persistent) Close() {
b.cleanupMux.Lock() b.cleanupMux.Lock()

View File

@@ -1,23 +0,0 @@
package cache
import bolt "go.etcd.io/bbolt"
// PurgeTempUploads will remove all the pending uploads from the queue
func (b *Persistent) PurgeTempUploads() {
b.tempQueueMux.Lock()
defer b.tempQueueMux.Unlock()
_ = b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
return nil
})
}
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
item.Started = true
return nil
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,691 +0,0 @@
package chunker
import (
"bytes"
"context"
"flag"
"fmt"
"io/ioutil"
"path"
"regexp"
"strings"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Command line flags
var (
UploadKilobytes = flag.Int("upload-kilobytes", 0, "Upload size in Kilobytes, set this to test large uploads")
)
// test that chunking does not break large uploads
func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
t.Run(fmt.Sprintf("PutLarge%dk", kilobytes), func(t *testing.T) {
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
Size: int64(kilobytes) * int64(fs.KibiByte),
})
})
}
// test chunk name parser
func testChunkNameFormat(t *testing.T, f *Fs) {
saveOpt := f.opt
defer func() {
// restore original settings (f is pointer, f.opt is struct)
f.opt = saveOpt
_ = f.setChunkNameFormat(f.opt.NameFormat)
}()
assertFormat := func(pattern, wantDataFormat, wantCtrlFormat, wantNameRegexp string) {
err := f.setChunkNameFormat(pattern)
assert.NoError(t, err)
assert.Equal(t, wantDataFormat, f.dataNameFmt)
assert.Equal(t, wantCtrlFormat, f.ctrlNameFmt)
assert.Equal(t, wantNameRegexp, f.nameRegexp.String())
}
assertFormatValid := func(pattern string) {
err := f.setChunkNameFormat(pattern)
assert.NoError(t, err)
}
assertFormatInvalid := func(pattern string) {
err := f.setChunkNameFormat(pattern)
assert.Error(t, err)
}
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType, xactID string) {
gotChunkName := ""
assert.NotPanics(t, func() {
gotChunkName = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%q) must not panic", mainName, chunkNo, ctrlType, xactID)
if gotChunkName != "" {
assert.Equal(t, wantChunkName, gotChunkName)
}
}
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType, xactID string) {
assert.Panics(t, func() {
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%q) should panic", mainName, chunkNo, ctrlType, xactID)
}
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType, wantXactID string) {
gotMainName, gotChunkNo, gotCtrlType, gotXactID := f.parseChunkName(fileName)
assert.Equal(t, wantMainName, gotMainName)
assert.Equal(t, wantChunkNo, gotChunkNo)
assert.Equal(t, wantCtrlType, gotCtrlType)
assert.Equal(t, wantXactID, gotXactID)
}
const newFormatSupported = false // support for patterns not starting with base name (*)
// valid formats
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
if newFormatSupported {
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z][a-z0-9]{2,6})),(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
}
// invalid formats
assertFormatInvalid(`chunk-#`)
assertFormatInvalid(`*-chunk`)
assertFormatInvalid(`*-*-chunk-#`)
assertFormatInvalid(`*-chunk-#-#`)
assertFormatInvalid(`#-chunk-*`)
assertFormatInvalid(`*/#`)
assertFormatValid(`*#`)
assertFormatInvalid(`**#`)
assertFormatInvalid(`#*`)
assertFormatInvalid(``)
assertFormatInvalid(`-`)
// quick tests
if newFormatSupported {
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9][0-9a-z]{3,8})\.\.tmp_([0-9]{10,13}))?$`)
f.opt.StartFrom = 1
assertMakeName(`part_fish_1`, "fish", 0, "", "")
assertParseName(`part_fish_43`, "fish", 42, "", "")
assertMakeName(`part_fish__locks`, "fish", -2, "locks", "")
assertParseName(`part_fish__locks`, "fish", -1, "locks", "")
assertMakeName(`part_fish__x2y`, "fish", -2, "x2y", "")
assertParseName(`part_fish__x2y`, "fish", -1, "x2y", "")
assertMakeName(`part_fish_3_0004`, "fish", 2, "", "4")
assertParseName(`part_fish_4_0005`, "fish", 3, "", "0005")
assertMakeName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -3, "blkinfo", "jj5fvo3wr")
assertParseName(`part_fish__blkinfo_zz9fvo3wr`, "fish", -1, "blkinfo", "zz9fvo3wr")
// old-style temporary suffix (parse only)
assertParseName(`part_fish_4..tmp_0000000011`, "fish", 3, "", "000b")
assertParseName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -1, "blkinfo", "jj5fvo3wr")
}
// prepare format for long tests
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
f.opt.StartFrom = 2
// valid data chunks
assertMakeName(`fish.chunk.003`, "fish", 1, "", "")
assertParseName(`fish.chunk.003`, "fish", 1, "", "")
assertMakeName(`fish.chunk.021`, "fish", 19, "", "")
assertParseName(`fish.chunk.021`, "fish", 19, "", "")
// valid temporary data chunks
assertMakeName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertParseName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertMakeName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertParseName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertMakeName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertParseName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertMakeName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
assertParseName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
// valid temporary data chunks (old temporary suffix, only parse)
assertParseName(`fish.chunk.004..tmp_0000000047`, "fish", 2, "", "001b")
assertParseName(`fish.chunk.323..tmp_9994567890123`, "fish", 321, "", "3jjfvo3wr")
// parsing invalid data chunk names
assertParseName(`fish.chunk.3`, "", -1, "", "")
assertParseName(`fish.chunk.001`, "", -1, "", "")
assertParseName(`fish.chunk.21`, "", -1, "", "")
assertParseName(`fish.chunk.-21`, "", -1, "", "")
assertParseName(`fish.chunk.004abcd`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk.004__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk.004_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk.004_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk.004_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk.004_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk.004_12.3`, "", -1, "", "") // punctuation not allowed
// parsing invalid data chunk names (old temporary suffix)
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", "")
assertParseName(`fish.chunk.323..tmp_12345678901234`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", "")
// valid control chunks
assertMakeName(`fish.chunk._info`, "fish", -1, "info", "")
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", "")
assertMakeName(`fish.chunk._blkinfo`, "fish", -3, "blkinfo", "")
assertMakeName(`fish.chunk._x2y`, "fish", -4, "x2y", "")
assertParseName(`fish.chunk._info`, "fish", -1, "info", "")
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", "")
assertParseName(`fish.chunk._blkinfo`, "fish", -1, "blkinfo", "")
assertParseName(`fish.chunk._x2y`, "fish", -1, "x2y", "")
// valid temporary control chunks
assertMakeName(`fish.chunk._info_0001`, "fish", -1, "info", "1")
assertMakeName(`fish.chunk._locks_4321`, "fish", -2, "locks", "4321")
assertMakeName(`fish.chunk._uploads_abcd`, "fish", -3, "uploads", "abcd")
assertMakeName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -4, "blkinfo", "xyzabcdef")
assertMakeName(`fish.chunk._x2y_1aaa`, "fish", -5, "x2y", "1aaa")
assertParseName(`fish.chunk._info_0001`, "fish", -1, "info", "0001")
assertParseName(`fish.chunk._locks_4321`, "fish", -1, "locks", "4321")
assertParseName(`fish.chunk._uploads_9abc`, "fish", -1, "uploads", "9abc")
assertParseName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -1, "blkinfo", "xyzabcdef")
assertParseName(`fish.chunk._x2y_1aaa`, "fish", -1, "x2y", "1aaa")
// valid temporary control chunks (old temporary suffix, parse only)
assertParseName(`fish.chunk._info..tmp_0000000047`, "fish", -1, "info", "001b")
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", "15wx")
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", "0000")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123`, "fish", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._x2y..tmp_0000000000`, "fish", -1, "x2y", "0000")
// parsing invalid control chunk names
assertParseName(`fish.chunk.metadata`, "", -1, "", "") // must be prepended by underscore
assertParseName(`fish.chunk.info`, "", -1, "", "")
assertParseName(`fish.chunk.locks`, "", -1, "", "")
assertParseName(`fish.chunk.uploads`, "", -1, "", "")
assertParseName(`fish.chunk._os`, "", -1, "", "") // too short
assertParseName(`fish.chunk._metadata`, "", -1, "", "") // too long
assertParseName(`fish.chunk._blockinfo`, "", -1, "", "") // way too long
assertParseName(`fish.chunk._4me`, "", -1, "", "") // cannot start with digit
assertParseName(`fish.chunk._567`, "", -1, "", "") // cannot be all digits
assertParseName(`fish.chunk._me_ta`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._in-fo`, "", -1, "", "")
assertParseName(`fish.chunk._.bin`, "", -1, "", "")
assertParseName(`fish.chunk._.2xy`, "", -1, "", "")
// parsing invalid temporary control chunks
assertParseName(`fish.chunk._blkinfo1234`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk._info__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk._info_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk._info_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk._info_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk._info_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk._info_12.3`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", "")
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", "")
// short control chunk names: 3 letters ok, 1-2 letters not allowed
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", "")
assertParseName(`fish.chunk._int`, "fish", -1, "int", "")
assertMakeNamePanics("fish", -1, "in", "")
assertMakeNamePanics("fish", -1, "up", "4")
assertMakeNamePanics("fish", -1, "x", "")
assertMakeNamePanics("fish", -1, "c", "1z")
assertMakeName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0")
assertMakeName(`fish.chunk._ext_0026`, "fish", -1, "ext", "26")
assertMakeName(`fish.chunk._int_0abc`, "fish", -1, "int", "abc")
assertMakeName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0000")
assertParseName(`fish.chunk._ext_0026`, "fish", -1, "ext", "0026")
assertParseName(`fish.chunk._int_0abc`, "fish", -1, "int", "0abc")
assertParseName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
// base file name can sometimes look like a valid chunk name
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", "")
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", "")
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", "")
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", "")
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", "")
// base file name looking like a valid chunk name (old temporary suffix)
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000022`, "fish.chunk.003", 3, "", "000m")
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000023`, "fish.chunk._info", 3, "", "000n")
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk.003.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.003", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._info.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._info", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000025`, "fish.chunk.004..tmp_0000000021", 3, "", "000p")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.004..tmp_0000000021", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.004`, "fish.chunk._blkinfo..tmp_9994567890123", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.005..tmp_0000000026`, "fish.chunk._blkinfo..tmp_9994567890123", 3, "", "000q")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blkinfo..tmp_1234567890123456789", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.005..tmp_0000000022`, "fish.chunk._blkinfo..tmp_1234567890123456789", 3, "", "000m")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
// attempts to make invalid chunk names
assertMakeNamePanics("fish", -1, "", "") // neither data nor control
assertMakeNamePanics("fish", 0, "info", "") // both data and control
assertMakeNamePanics("fish", -1, "metadata", "") // control type too long
assertMakeNamePanics("fish", -1, "blockinfo", "") // control type way too long
assertMakeNamePanics("fish", -1, "2xy", "") // first digit not allowed
assertMakeNamePanics("fish", -1, "123", "") // all digits not allowed
assertMakeNamePanics("fish", -1, "Meta", "") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", "") // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", "")
assertMakeNamePanics("fish", -1, "info_", "")
assertMakeNamePanics("fish", -2, ".bind", "")
assertMakeNamePanics("fish", -2, "bind.", "")
assertMakeNamePanics("fish", -1, "", "1") // neither data nor control
assertMakeNamePanics("fish", 0, "info", "23") // both data and control
assertMakeNamePanics("fish", -1, "metadata", "45") // control type too long
assertMakeNamePanics("fish", -1, "blockinfo", "7") // control type way too long
assertMakeNamePanics("fish", -1, "2xy", "abc") // first digit not allowed
assertMakeNamePanics("fish", -1, "123", "def") // all digits not allowed
assertMakeNamePanics("fish", -1, "Meta", "mnk") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", "xyz") // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", "5678")
assertMakeNamePanics("fish", -1, "info_", "999")
assertMakeNamePanics("fish", -2, ".bind", "0")
assertMakeNamePanics("fish", -2, "bind.", "0")
assertMakeNamePanics("fish", 0, "", "1234567890") // temporary suffix too long
assertMakeNamePanics("fish", 0, "", "123F4") // uppercase not allowed
assertMakeNamePanics("fish", 0, "", "123.") // punctuation not allowed
assertMakeNamePanics("fish", 0, "", "_123")
}
func testSmallFileInternals(t *testing.T, f *Fs) {
const dir = "small"
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
f.opt.FailHard = false
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
checkSmallFileInternals := func(obj fs.Object) {
assert.NotNil(t, obj)
o, ok := obj.(*Object)
assert.True(t, ok)
assert.NotNil(t, o)
if o == nil {
return
}
switch {
case !f.useMeta:
// If meta format is "none", non-chunked file (even empty)
// internally is a single chunk without meta object.
assert.Nil(t, o.main)
assert.True(t, o.isComposite()) // sorry, sometimes a name is misleading
assert.Equal(t, 1, len(o.chunks))
case f.hashAll:
// Consistent hashing forces meta object on small files too
assert.NotNil(t, o.main)
assert.True(t, o.isComposite())
assert.Equal(t, 1, len(o.chunks))
default:
// normally non-chunked file is kept in the Object's main field
assert.NotNil(t, o.main)
assert.False(t, o.isComposite())
assert.Equal(t, 0, len(o.chunks))
}
}
checkContents := func(obj fs.Object, contents string) {
assert.NotNil(t, obj)
assert.Equal(t, int64(len(contents)), obj.Size())
r, err := obj.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
if r == nil {
return
}
data, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
}
checkHashsum := func(obj fs.Object) {
var ht hash.Type
switch {
case !f.hashAll:
return
case f.useMD5:
ht = hash.MD5
case f.useSHA1:
ht = hash.SHA1
default:
return
}
// even empty files must have hashsum in consistent mode
sum, err := obj.Hash(ctx, ht)
assert.NoError(t, err)
assert.NotEqual(t, sum, "")
}
checkSmallFile := func(name, contents string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
assert.NotNil(t, put)
checkSmallFileInternals(put)
checkContents(put, contents)
checkHashsum(put)
// objects returned by Put and NewObject must have similar structure
obj, err := f.NewObject(ctx, filename)
assert.NoError(t, err)
assert.NotNil(t, obj)
checkSmallFileInternals(obj)
checkContents(obj, contents)
checkHashsum(obj)
_ = obj.Remove(ctx)
_ = put.Remove(ctx) // for good
}
checkSmallFile("emptyfile", "")
checkSmallFile("smallfile", "Ok")
}
func testPreventCorruption(t *testing.T, f *Fs) {
if f.opt.ChunkSize > 50 {
t.Skip("this test requires small chunks")
}
const dir = "corrupted"
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
f.opt.FailHard = true
contents := random.String(250)
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
const overlapMessage = "chunk overlap"
assertOverlapError := func(err error) {
assert.Error(t, err)
if err != nil {
assert.Contains(t, err.Error(), overlapMessage)
}
}
newFile := func(name string) fs.Object {
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj
}
billyObj := newFile("billy")
billyChunkName := func(chunkNo int) string {
return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
}
err := f.Mkdir(ctx, billyChunkName(1))
assertOverlapError(err)
_, err = f.Move(ctx, newFile("silly1"), billyChunkName(2))
assert.Error(t, err)
assert.True(t, err == fs.ErrorCantMove || (err != nil && strings.Contains(err.Error(), overlapMessage)))
_, err = f.Copy(ctx, newFile("silly2"), billyChunkName(3))
assert.Error(t, err)
assert.True(t, err == fs.ErrorCantCopy || (err != nil && strings.Contains(err.Error(), overlapMessage)))
// accessing chunks in strict mode is prohibited
f.opt.FailHard = true
billyChunk4Name := billyChunkName(4)
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
assertOverlapError(err)
f.opt.FailHard = false
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
assert.NoError(t, err)
require.NotNil(t, billyChunk4)
f.opt.FailHard = true
_, err = f.Put(ctx, bytes.NewBufferString(contents), billyChunk4)
assertOverlapError(err)
// you can freely read chunks (if you have an object)
r, err := billyChunk4.Open(ctx)
assert.NoError(t, err)
var chunkContents []byte
assert.NotPanics(t, func() {
chunkContents, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
assert.NotEqual(t, contents, string(chunkContents))
// but you can't change them
err = billyChunk4.Update(ctx, bytes.NewBufferString(contents), newFile("silly3"))
assertOverlapError(err)
// Remove isn't special, you can't corrupt files even if you have an object
err = billyChunk4.Remove(ctx)
assertOverlapError(err)
// recreate billy in case it was anyhow corrupted
willyObj := newFile("willy")
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
f.opt.FailHard = false
willyChunk, err := f.NewObject(ctx, willyChunkName)
f.opt.FailHard = true
assert.NoError(t, err)
require.NotNil(t, willyChunk)
_, err = operations.Copy(ctx, f, willyChunk, willyChunkName, newFile("silly4"))
assertOverlapError(err)
// operations.Move will return error when chunker's Move refused
// to corrupt target file, but reverts to copy/delete method
// still trying to delete target chunk. Chunker must come to rescue.
_, err = operations.Move(ctx, f, willyChunk, willyChunkName, newFile("silly5"))
assertOverlapError(err)
r, err = willyChunk.Open(ctx)
assert.NoError(t, err)
assert.NotPanics(t, func() {
_, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
}
func testChunkNumberOverflow(t *testing.T, f *Fs) {
if f.opt.ChunkSize > 50 {
t.Skip("this test requires small chunks")
}
const dir = "wreaked"
const wreakNumber = 10200300
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
contents := random.String(100)
newFile := func(f fs.Fs, name string) (fs.Object, string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj, filename
}
f.opt.FailHard = false
file, fileName := newFile(f, "wreaker")
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
f.opt.FailHard = false
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
_, err := f.NewObject(ctx, fileName)
assert.Error(t, err)
f.opt.FailHard = true
_, err = f.List(ctx, dir)
assert.Error(t, err)
_, err = f.NewObject(ctx, fileName)
assert.Error(t, err)
f.opt.FailHard = false
_ = wreak.Remove(ctx)
_ = file.Remove(ctx)
}
func testMetadataInput(t *testing.T, f *Fs) {
const minChunkForTest = 50
if f.opt.ChunkSize < minChunkForTest {
t.Skip("this test requires chunks that fit metadata")
}
const dir = "usermeta"
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
f.opt.FailHard = false
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
runSubtest := func(contents, name string) {
description := fmt.Sprintf("file with %s metadata", name)
filename := path.Join(dir, name)
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
_ = putFile(f, filename, contents, "upload "+description, false)
obj, err := f.NewObject(ctx, filename)
assert.NoError(t, err, "access "+description)
assert.NotNil(t, obj)
assert.Equal(t, int64(len(contents)), obj.Size(), "size "+description)
o, ok := obj.(*Object)
assert.NotNil(t, ok)
if o != nil {
assert.True(t, o.isComposite() && len(o.chunks) == 1, description+" is forced composite")
o = nil
}
defer func() {
_ = obj.Remove(ctx)
_ = part.Remove(ctx)
}()
r, err := obj.Open(ctx)
assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil {
data, err := ioutil.ReadAll(r)
assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close()
}
}
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
require.NoError(t, err)
todaysMeta := string(metaData)
runSubtest(todaysMeta, "today")
pastMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":1`)
pastMeta = regexp.MustCompile(`"size":[0-9]+`).ReplaceAllLiteralString(pastMeta, `"size":0`)
runSubtest(pastMeta, "past")
futureMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":999`)
futureMeta = regexp.MustCompile(`"nchunks":[0-9]+`).ReplaceAllLiteralString(futureMeta, `"nchunks":0,"x":"y"`)
runSubtest(futureMeta, "future")
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PutLarge", func(t *testing.T) {
if *UploadKilobytes <= 0 {
t.Skip("-upload-kilobytes is not set")
}
testPutLarge(t, f, *UploadKilobytes)
})
t.Run("ChunkNameFormat", func(t *testing.T) {
testChunkNameFormat(t, f)
})
t.Run("SmallFileInternals", func(t *testing.T) {
testSmallFileInternals(t, f)
})
t.Run("PreventCorruption", func(t *testing.T) {
testPreventCorruption(t, f)
})
t.Run("ChunkNumberOverflow", func(t *testing.T) {
testChunkNumberOverflow(t, f)
})
t.Run("MetadataInput", func(t *testing.T) {
testMetadataInput(t, f)
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1,58 +0,0 @@
// Test the Chunker filesystem interface
package chunker_test
import (
"flag"
"os"
"path/filepath"
"testing"
_ "github.com/rclone/rclone/backend/all" // for integration tests
"github.com/rclone/rclone/backend/chunker"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// Command line flags
var (
// Invalid characters are not supported by some remotes, eg. Mailru.
// We enable testing with invalid characters when -remote is not set, so
// chunker overlays a local directory, but invalid characters are disabled
// by default when -remote is set, eg. when test_all runs backend tests.
// You can still test with invalid characters using the below flag.
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
)
// TestIntegration runs integration tests against a concrete remote
// set by the -remote flag. If the flag is not set, it creates a
// dynamic chunker overlay wrapping a local temporary directory.
func TestIntegration(t *testing.T) {
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*chunker.Object)(nil),
SkipBadWindowsCharacters: !*UseBadChars,
UnimplementableObjectMethods: []string{
"MimeType",
"GetTier",
"SetTier",
},
UnimplementableFsMethods: []string{
"PublicLink",
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"UserInfo",
"Disconnect",
},
}
if *fstest.RemoteName == "" {
name := "TestChunker"
opt.RemoteName = name + ":"
tempDir := filepath.Join(os.TempDir(), "rclone-chunker-test-standard")
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "chunker"},
{Name: name, Key: "remote", Value: tempDir},
}
}
fstests.Run(t, &opt)
}

View File

@@ -2,7 +2,6 @@ package crypt
import ( import (
"bytes" "bytes"
"context"
"crypto/aes" "crypto/aes"
gocipher "crypto/cipher" gocipher "crypto/cipher"
"crypto/rand" "crypto/rand"
@@ -14,13 +13,15 @@ import (
"sync" "sync"
"unicode/utf8" "unicode/utf8"
"github.com/ncw/rclone/backend/crypt/pkcs7"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/scrypt" "golang.org/x/crypto/scrypt"
"github.com/rfjakob/eme"
) )
// Constants // Constants
@@ -42,7 +43,6 @@ var (
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars") ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize") ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
ErrorTooShortAfterDecode = errors.New("too short after base32 decode") ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted") ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header") ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string") ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
@@ -69,7 +69,31 @@ type ReadSeekCloser interface {
} }
// OpenRangeSeek opens the file handle at the offset with the limit given // OpenRangeSeek opens the file handle at the offset with the limit given
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error) type OpenRangeSeek func(offset, limit int64) (io.ReadCloser, error)
// Cipher is used to swap out the encryption implementations
type Cipher interface {
// EncryptFileName encrypts a file path
EncryptFileName(string) string
// DecryptFileName decrypts a file path, returns error if decrypt was invalid
DecryptFileName(string) (string, error)
// EncryptDirName encrypts a directory path
EncryptDirName(string) string
// DecryptDirName decrypts a directory path, returns error if decrypt was invalid
DecryptDirName(string) (string, error)
// EncryptData
EncryptData(io.Reader) (io.Reader, error)
// DecryptData
DecryptData(io.ReadCloser) (io.ReadCloser, error)
// DecryptDataSeek decrypt at a given position
DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
// EncryptedSize calculates the size of the data when encrypted
EncryptedSize(int64) int64
// DecryptedSize calculates the size of the data when decrypted
DecryptedSize(int64) (int64, error)
// NameEncryptionMode returns the used mode for name handling
NameEncryptionMode() NameEncryptionMode
}
// NameEncryptionMode is the type of file name encryption in use // NameEncryptionMode is the type of file name encryption in use
type NameEncryptionMode int type NameEncryptionMode int
@@ -112,8 +136,7 @@ func (mode NameEncryptionMode) String() (out string) {
return out return out
} }
// Cipher defines an encoding and decoding cipher for the crypt backend type cipher struct {
type Cipher struct {
dataKey [32]byte // Key for secretbox dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
@@ -125,8 +148,8 @@ type Cipher struct {
} }
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val // newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) { func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*cipher, error) {
c := &Cipher{ c := &cipher{
mode: mode, mode: mode,
cryptoRand: rand.Reader, cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt, dirNameEncrypt: dirNameEncrypt,
@@ -149,7 +172,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
// //
// Note that empty passsword makes all 0x00 keys which is used in the // Note that empty passsword makes all 0x00 keys which is used in the
// tests. // tests.
func (c *Cipher) Key(password, salt string) (err error) { func (c *cipher) Key(password, salt string) (err error) {
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak) const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
var saltBytes = defaultSalt var saltBytes = defaultSalt
if salt != "" { if salt != "" {
@@ -173,18 +196,33 @@ func (c *Cipher) Key(password, salt string) (err error) {
} }
// getBlock gets a block from the pool of size blockSize // getBlock gets a block from the pool of size blockSize
func (c *Cipher) getBlock() []byte { func (c *cipher) getBlock() []byte {
return c.buffers.Get().([]byte) return c.buffers.Get().([]byte)
} }
// putBlock returns a block to the pool of size blockSize // putBlock returns a block to the pool of size blockSize
func (c *Cipher) putBlock(buf []byte) { func (c *cipher) putBlock(buf []byte) {
if len(buf) != blockSize { if len(buf) != blockSize {
panic("bad blocksize returned to pool") panic("bad blocksize returned to pool")
} }
c.buffers.Put(buf) c.buffers.Put(buf)
} }
// check to see if the byte string is valid with no control characters
// from 0x00 to 0x1F and is a valid UTF-8 string
func checkValidString(buf []byte) error {
for i := range buf {
c := buf[i]
if c >= 0x00 && c < 0x20 || c == 0x7F {
return ErrorBadDecryptControlChar
}
}
if !utf8.Valid(buf) {
return ErrorBadDecryptUTF8
}
return nil
}
// encodeFileName encodes a filename using a modified version of // encodeFileName encodes a filename using a modified version of
// standard base32 as described in RFC4648 // standard base32 as described in RFC4648
// //
@@ -217,13 +255,13 @@ func decodeFileName(in string) ([]byte, error) {
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and // 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
// Rogaway. // Rogaway.
// //
// This makes for deterministic encryption which is what we want - the // This makes for determinstic encryption which is what we want - the
// same filename must encrypt to the same thing. // same filename must encrypt to the same thing.
// //
// This means that // This means that
// * filenames with the same name will encrypt the same // * filenames with the same name will encrypt the same
// * filenames which start the same won't have a common prefix // * filenames which start the same won't have a common prefix
func (c *Cipher) encryptSegment(plaintext string) string { func (c *cipher) encryptSegment(plaintext string) string {
if plaintext == "" { if plaintext == "" {
return "" return ""
} }
@@ -233,7 +271,7 @@ func (c *Cipher) encryptSegment(plaintext string) string {
} }
// decryptSegment decrypts a path segment // decryptSegment decrypts a path segment
func (c *Cipher) decryptSegment(ciphertext string) (string, error) { func (c *cipher) decryptSegment(ciphertext string) (string, error) {
if ciphertext == "" { if ciphertext == "" {
return "", nil return "", nil
} }
@@ -248,19 +286,20 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
// not possible if decodeFilename() working correctly // not possible if decodeFilename() working correctly
return "", ErrorTooShortAfterDecode return "", ErrorTooShortAfterDecode
} }
if len(rawCiphertext) > 2048 {
return "", ErrorTooLongAfterDecode
}
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt) paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext) plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
if err != nil { if err != nil {
return "", err return "", err
} }
err = checkValidString(plaintext)
if err != nil {
return "", err
}
return string(plaintext), err return string(plaintext), err
} }
// Simple obfuscation routines // Simple obfuscation routines
func (c *Cipher) obfuscateSegment(plaintext string) string { func (c *cipher) obfuscateSegment(plaintext string) string {
if plaintext == "" { if plaintext == "" {
return "" return ""
} }
@@ -347,7 +386,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
return result.String() return result.String()
} }
func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) { func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
if ciphertext == "" { if ciphertext == "" {
return "", nil return "", nil
} }
@@ -422,7 +461,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
if int(newRune) < base { if int(newRune) < base {
newRune += 256 newRune += 256
} }
_, _ = result.WriteRune(newRune) _, _ = result.WriteRune(rune(newRune))
default: default:
_, _ = result.WriteRune(runeValue) _, _ = result.WriteRune(runeValue)
@@ -434,7 +473,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
} }
// encryptFileName encrypts a file path // encryptFileName encrypts a file path
func (c *Cipher) encryptFileName(in string) string { func (c *cipher) encryptFileName(in string) string {
segments := strings.Split(in, "/") segments := strings.Split(in, "/")
for i := range segments { for i := range segments {
// Skip directory name encryption if the user chose to // Skip directory name encryption if the user chose to
@@ -452,7 +491,7 @@ func (c *Cipher) encryptFileName(in string) string {
} }
// EncryptFileName encrypts a file path // EncryptFileName encrypts a file path
func (c *Cipher) EncryptFileName(in string) string { func (c *cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff { if c.mode == NameEncryptionOff {
return in + encryptedSuffix return in + encryptedSuffix
} }
@@ -460,7 +499,7 @@ func (c *Cipher) EncryptFileName(in string) string {
} }
// EncryptDirName encrypts a directory path // EncryptDirName encrypts a directory path
func (c *Cipher) EncryptDirName(in string) string { func (c *cipher) EncryptDirName(in string) string {
if c.mode == NameEncryptionOff || !c.dirNameEncrypt { if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
return in return in
} }
@@ -468,7 +507,7 @@ func (c *Cipher) EncryptDirName(in string) string {
} }
// decryptFileName decrypts a file path // decryptFileName decrypts a file path
func (c *Cipher) decryptFileName(in string) (string, error) { func (c *cipher) decryptFileName(in string) (string, error) {
segments := strings.Split(in, "/") segments := strings.Split(in, "/")
for i := range segments { for i := range segments {
var err error var err error
@@ -491,7 +530,7 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
} }
// DecryptFileName decrypts a file path // DecryptFileName decrypts a file path
func (c *Cipher) DecryptFileName(in string) (string, error) { func (c *cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff { if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(encryptedSuffix) remainingLength := len(in) - len(encryptedSuffix)
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) { if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
@@ -503,15 +542,14 @@ func (c *Cipher) DecryptFileName(in string) (string, error) {
} }
// DecryptDirName decrypts a directory path // DecryptDirName decrypts a directory path
func (c *Cipher) DecryptDirName(in string) (string, error) { func (c *cipher) DecryptDirName(in string) (string, error) {
if c.mode == NameEncryptionOff || !c.dirNameEncrypt { if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
return in, nil return in, nil
} }
return c.decryptFileName(in) return c.decryptFileName(in)
} }
// NameEncryptionMode returns the encryption mode in use for names func (c *cipher) NameEncryptionMode() NameEncryptionMode {
func (c *Cipher) NameEncryptionMode() NameEncryptionMode {
return c.mode return c.mode
} }
@@ -559,7 +597,7 @@ func (n *nonce) increment() {
n.carry(0) n.carry(0)
} }
// add a uint64 to the nonce // add an uint64 to the nonce
func (n *nonce) add(x uint64) { func (n *nonce) add(x uint64) {
carry := uint16(0) carry := uint16(0)
for i := 0; i < 8; i++ { for i := 0; i < 8; i++ {
@@ -579,7 +617,7 @@ func (n *nonce) add(x uint64) {
type encrypter struct { type encrypter struct {
mu sync.Mutex mu sync.Mutex
in io.Reader in io.Reader
c *Cipher c *cipher
nonce nonce nonce nonce
buf []byte buf []byte
readBuf []byte readBuf []byte
@@ -589,7 +627,7 @@ type encrypter struct {
} }
// newEncrypter creates a new file handle encrypting on the fly // newEncrypter creates a new file handle encrypting on the fly
func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) { func (c *cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
fh := &encrypter{ fh := &encrypter{
in: in, in: in,
c: c, c: c,
@@ -661,19 +699,13 @@ func (fh *encrypter) finish(err error) (int, error) {
} }
// Encrypt data encrypts the data stream // Encrypt data encrypts the data stream
func (c *Cipher) encryptData(in io.Reader) (io.Reader, *encrypter, error) { func (c *cipher) EncryptData(in io.Reader) (io.Reader, error) {
in, wrap := accounting.UnWrap(in) // unwrap the accounting off the Reader in, wrap := accounting.UnWrap(in) // unwrap the accounting off the Reader
out, err := c.newEncrypter(in, nil) out, err := c.newEncrypter(in, nil)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
return wrap(out), out, nil // and wrap the accounting back on return wrap(out), nil // and wrap the accounting back on
}
// EncryptData encrypts the data stream
func (c *Cipher) EncryptData(in io.Reader) (io.Reader, error) {
out, _, err := c.encryptData(in)
return out, err
} }
// decrypter decrypts an io.ReaderCloser on the fly // decrypter decrypts an io.ReaderCloser on the fly
@@ -682,7 +714,7 @@ type decrypter struct {
rc io.ReadCloser rc io.ReadCloser
nonce nonce nonce nonce
initialNonce nonce initialNonce nonce
c *Cipher c *cipher
buf []byte buf []byte
readBuf []byte readBuf []byte
bufIndex int bufIndex int
@@ -693,7 +725,7 @@ type decrypter struct {
} }
// newDecrypter creates a new file handle decrypting on the fly // newDecrypter creates a new file handle decrypting on the fly
func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) { func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
fh := &decrypter{ fh := &decrypter{
rc: rc, rc: rc,
c: c, c: c,
@@ -714,29 +746,29 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) { if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) {
return nil, fh.finishAndClose(ErrorEncryptedBadMagic) return nil, fh.finishAndClose(ErrorEncryptedBadMagic)
} }
// retrieve the nonce // retreive the nonce
fh.nonce.fromBuf(readBuf[fileMagicSize:]) fh.nonce.fromBuf(readBuf[fileMagicSize:])
fh.initialNonce = fh.nonce fh.initialNonce = fh.nonce
return fh, nil return fh, nil
} }
// newDecrypterSeek creates a new file handle decrypting on the fly // newDecrypterSeek creates a new file handle decrypting on the fly
func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) { func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
var rc io.ReadCloser var rc io.ReadCloser
doRangeSeek := false doRangeSeek := false
setLimit := false setLimit := false
// Open initially with no seek // Open initially with no seek
if offset == 0 && limit < 0 { if offset == 0 && limit < 0 {
// If no offset or limit then open whole file // If no offset or limit then open whole file
rc, err = open(ctx, 0, -1) rc, err = open(0, -1)
} else if offset == 0 { } else if offset == 0 {
// If no offset open the header + limit worth of the file // If no offset open the header + limit worth of the file
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit) _, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
rc, err = open(ctx, 0, int64(fileHeaderSize)+underlyingLimit) rc, err = open(0, int64(fileHeaderSize)+underlyingLimit)
setLimit = true setLimit = true
} else { } else {
// Otherwise just read the header to start with // Otherwise just read the header to start with
rc, err = open(ctx, 0, int64(fileHeaderSize)) rc, err = open(0, int64(fileHeaderSize))
doRangeSeek = true doRangeSeek = true
} }
if err != nil { if err != nil {
@@ -749,7 +781,7 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
} }
fh.open = open // will be called by fh.RangeSeek fh.open = open // will be called by fh.RangeSeek
if doRangeSeek { if doRangeSeek {
_, err = fh.RangeSeek(ctx, offset, io.SeekStart, limit) _, err = fh.RangeSeek(offset, io.SeekStart, limit)
if err != nil { if err != nil {
_ = fh.Close() _ = fh.Close()
return nil, err return nil, err
@@ -869,7 +901,7 @@ func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit
// limiting the total length to limit. // limiting the total length to limit.
// //
// RangeSeek with a limit of < 0 is equivalent to a regular Seek. // RangeSeek with a limit of < 0 is equivalent to a regular Seek.
func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, limit int64) (int64, error) { func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, error) {
fh.mu.Lock() fh.mu.Lock()
defer fh.mu.Unlock() defer fh.mu.Unlock()
@@ -896,7 +928,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Can we seek underlying stream directly? // Can we seek underlying stream directly?
if do, ok := fh.rc.(fs.RangeSeeker); ok { if do, ok := fh.rc.(fs.RangeSeeker); ok {
// Seek underlying stream directly // Seek underlying stream directly
_, err := do.RangeSeek(ctx, underlyingOffset, 0, underlyingLimit) _, err := do.RangeSeek(underlyingOffset, 0, underlyingLimit)
if err != nil { if err != nil {
return 0, fh.finish(err) return 0, fh.finish(err)
} }
@@ -906,7 +938,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
fh.rc = nil fh.rc = nil
// Re-open the underlying object with the offset given // Re-open the underlying object with the offset given
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit) rc, err := fh.open(underlyingOffset, underlyingLimit)
if err != nil { if err != nil {
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit")) return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
} }
@@ -935,7 +967,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Seek implements the io.Seeker interface // Seek implements the io.Seeker interface
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) { func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
return fh.RangeSeek(context.TODO(), offset, whence, -1) return fh.RangeSeek(offset, whence, -1)
} }
// finish sets the final error and tidies up // finish sets the final error and tidies up
@@ -996,7 +1028,7 @@ func (fh *decrypter) finishAndClose(err error) error {
} }
// DecryptData decrypts the data stream // DecryptData decrypts the data stream
func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) { func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
out, err := c.newDecrypter(rc) out, err := c.newDecrypter(rc)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -1009,8 +1041,8 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// The open function must return a ReadCloser opened to the offset supplied // The open function must return a ReadCloser opened to the offset supplied
// //
// You must use this form of DecryptData if you might want to Seek the file handle // You must use this form of DecryptData if you might want to Seek the file handle
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) { func (c *cipher) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(ctx, open, offset, limit) out, err := c.newDecrypterSeek(open, offset, limit)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1018,7 +1050,7 @@ func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset
} }
// EncryptedSize calculates the size of the data when encrypted // EncryptedSize calculates the size of the data when encrypted
func (c *Cipher) EncryptedSize(size int64) int64 { func (c *cipher) EncryptedSize(size int64) int64 {
blocks, residue := size/blockDataSize, size%blockDataSize blocks, residue := size/blockDataSize, size%blockDataSize
encryptedSize := int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize) encryptedSize := int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize)
if residue != 0 { if residue != 0 {
@@ -1028,7 +1060,7 @@ func (c *Cipher) EncryptedSize(size int64) int64 {
} }
// DecryptedSize calculates the size of the data when decrypted // DecryptedSize calculates the size of the data when decrypted
func (c *Cipher) DecryptedSize(size int64) (int64, error) { func (c *cipher) DecryptedSize(size int64) (int64, error) {
size -= int64(fileHeaderSize) size -= int64(fileHeaderSize)
if size < 0 { if size < 0 {
return 0, ErrorEncryptedFileTooShort return 0, ErrorEncryptedFileTooShort
@@ -1047,6 +1079,7 @@ func (c *Cipher) DecryptedSize(size int64) (int64, error) {
// check interfaces // check interfaces
var ( var (
_ Cipher = (*cipher)(nil)
_ io.ReadCloser = (*decrypter)(nil) _ io.ReadCloser = (*decrypter)(nil)
_ io.Seeker = (*decrypter)(nil) _ io.Seeker = (*decrypter)(nil)
_ fs.RangeSeeker = (*decrypter)(nil) _ fs.RangeSeeker = (*decrypter)(nil)

View File

@@ -2,7 +2,6 @@ package crypt
import ( import (
"bytes" "bytes"
"context"
"encoding/base32" "encoding/base32"
"fmt" "fmt"
"io" "io"
@@ -10,9 +9,8 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/ncw/rclone/backend/crypt/pkcs7"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -26,7 +24,7 @@ func TestNewNameEncryptionMode(t *testing.T) {
{"off", NameEncryptionOff, ""}, {"off", NameEncryptionOff, ""},
{"standard", NameEncryptionStandard, ""}, {"standard", NameEncryptionStandard, ""},
{"obfuscate", NameEncryptionObfuscated, ""}, {"obfuscate", NameEncryptionObfuscated, ""},
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""}, {"potato", NameEncryptionMode(0), "Unknown file name encryption mode \"potato\""},
} { } {
actual, actualErr := NewNameEncryptionMode(test.in) actual, actualErr := NewNameEncryptionMode(test.in)
assert.Equal(t, actual, test.expected) assert.Equal(t, actual, test.expected)
@@ -45,6 +43,69 @@ func TestNewNameEncryptionModeString(t *testing.T) {
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3") assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
} }
func TestValidString(t *testing.T) {
for _, test := range []struct {
in string
expected error
}{
{"", nil},
{"\x01", ErrorBadDecryptControlChar},
{"a\x02", ErrorBadDecryptControlChar},
{"abc\x03", ErrorBadDecryptControlChar},
{"abc\x04def", ErrorBadDecryptControlChar},
{"\x05d", ErrorBadDecryptControlChar},
{"\x06def", ErrorBadDecryptControlChar},
{"\x07", ErrorBadDecryptControlChar},
{"\x08", ErrorBadDecryptControlChar},
{"\x09", ErrorBadDecryptControlChar},
{"\x0A", ErrorBadDecryptControlChar},
{"\x0B", ErrorBadDecryptControlChar},
{"\x0C", ErrorBadDecryptControlChar},
{"\x0D", ErrorBadDecryptControlChar},
{"\x0E", ErrorBadDecryptControlChar},
{"\x0F", ErrorBadDecryptControlChar},
{"\x10", ErrorBadDecryptControlChar},
{"\x11", ErrorBadDecryptControlChar},
{"\x12", ErrorBadDecryptControlChar},
{"\x13", ErrorBadDecryptControlChar},
{"\x14", ErrorBadDecryptControlChar},
{"\x15", ErrorBadDecryptControlChar},
{"\x16", ErrorBadDecryptControlChar},
{"\x17", ErrorBadDecryptControlChar},
{"\x18", ErrorBadDecryptControlChar},
{"\x19", ErrorBadDecryptControlChar},
{"\x1A", ErrorBadDecryptControlChar},
{"\x1B", ErrorBadDecryptControlChar},
{"\x1C", ErrorBadDecryptControlChar},
{"\x1D", ErrorBadDecryptControlChar},
{"\x1E", ErrorBadDecryptControlChar},
{"\x1F", ErrorBadDecryptControlChar},
{"\x20", nil},
{"\x7E", nil},
{"\x7F", ErrorBadDecryptControlChar},
{"£100", nil},
{`hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`, nil},
{"£100", nil},
// Following tests from https://secure.php.net/manual/en/reference.pcre.pattern.modifiers.php#54805
{"a", nil}, // Valid ASCII
{"\xc3\xb1", nil}, // Valid 2 Octet Sequence
{"\xc3\x28", ErrorBadDecryptUTF8}, // Invalid 2 Octet Sequence
{"\xa0\xa1", ErrorBadDecryptUTF8}, // Invalid Sequence Identifier
{"\xe2\x82\xa1", nil}, // Valid 3 Octet Sequence
{"\xe2\x28\xa1", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 2nd Octet)
{"\xe2\x82\x28", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 3rd Octet)
{"\xf0\x90\x8c\xbc", nil}, // Valid 4 Octet Sequence
{"\xf0\x28\x8c\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 2nd Octet)
{"\xf0\x90\x28\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 3rd Octet)
{"\xf0\x28\x8c\x28", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 4th Octet)
{"\xf8\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 5 Octet Sequence (but not Unicode!)
{"\xfc\xa1\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 6 Octet Sequence (but not Unicode!)
} {
actual := checkValidString([]byte(test.in))
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
}
}
func TestEncodeFileName(t *testing.T) { func TestEncodeFileName(t *testing.T) {
for _, test := range []struct { for _, test := range []struct {
in string in string
@@ -133,10 +194,6 @@ func TestEncryptSegment(t *testing.T) {
func TestDecryptSegment(t *testing.T) { func TestDecryptSegment(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors // We've tested the forwards above, now concentrate on the errors
longName := make([]byte, 3328)
for i := range longName {
longName[i] = 'a'
}
c, _ := newCipher(NameEncryptionStandard, "", "", true) c, _ := newCipher(NameEncryptionStandard, "", "", true)
for _, test := range []struct { for _, test := range []struct {
in string in string
@@ -144,10 +201,11 @@ func TestDecryptSegment(t *testing.T) {
}{ }{
{"64=", ErrorBadBase32Encoding}, {"64=", ErrorBadBase32Encoding},
{"!", base32.CorruptInputError(0)}, {"!", base32.CorruptInputError(0)},
{string(longName), ErrorTooLongAfterDecode},
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize}, {encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize}, {encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong}, {encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
{c.encryptSegment("\x01"), ErrorBadDecryptControlChar},
{c.encryptSegment("\xc3\x28"), ErrorBadDecryptUTF8},
} { } {
actual, actualErr := c.decryptSegment(test.in) actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr)) assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
@@ -641,16 +699,16 @@ var (
// Test test infrastructure first! // Test test infrastructure first!
func TestRandomSource(t *testing.T) { func TestRandomSource(t *testing.T) {
source := newRandomSource(1e8) source := newRandomSource(1E8)
sink := newRandomSource(1e8) sink := newRandomSource(1E8)
n, err := io.Copy(sink, source) n, err := io.Copy(sink, source)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, int64(1e8), n) assert.Equal(t, int64(1E8), n)
source = newRandomSource(1e8) source = newRandomSource(1E8)
buf := make([]byte, 16) buf := make([]byte, 16)
_, _ = source.Read(buf) _, _ = source.Read(buf)
sink = newRandomSource(1e8) sink = newRandomSource(1E8)
_, err = io.Copy(sink, source) _, err = io.Copy(sink, source)
assert.Error(t, err, "Error in stream") assert.Error(t, err, "Error in stream")
} }
@@ -690,23 +748,23 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
} }
func TestEncryptDecrypt1(t *testing.T) { func TestEncryptDecrypt1(t *testing.T) {
testEncryptDecrypt(t, 1, 1e7) testEncryptDecrypt(t, 1, 1E7)
} }
func TestEncryptDecrypt32(t *testing.T) { func TestEncryptDecrypt32(t *testing.T) {
testEncryptDecrypt(t, 32, 1e8) testEncryptDecrypt(t, 32, 1E8)
} }
func TestEncryptDecrypt4096(t *testing.T) { func TestEncryptDecrypt4096(t *testing.T) {
testEncryptDecrypt(t, 4096, 1e8) testEncryptDecrypt(t, 4096, 1E8)
} }
func TestEncryptDecrypt65536(t *testing.T) { func TestEncryptDecrypt65536(t *testing.T) {
testEncryptDecrypt(t, 65536, 1e8) testEncryptDecrypt(t, 65536, 1E8)
} }
func TestEncryptDecrypt65537(t *testing.T) { func TestEncryptDecrypt65537(t *testing.T) {
testEncryptDecrypt(t, 65537, 1e8) testEncryptDecrypt(t, 65537, 1E8)
} }
var ( var (
@@ -739,7 +797,7 @@ func TestEncryptData(t *testing.T) {
} { } {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
// Check encode works // Check encode works
buf := bytes.NewBuffer(test.in) buf := bytes.NewBuffer(test.in)
@@ -762,7 +820,7 @@ func TestEncryptData(t *testing.T) {
func TestNewEncrypter(t *testing.T) { func TestNewEncrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
z := &zeroes{} z := &zeroes{}
@@ -785,15 +843,23 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF} in := &errorReader{io.ErrUnexpectedEOF}
fh, err := c.newEncrypter(in, nil) fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6) n, err := io.CopyN(ioutil.Discard, fh, 1E6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n) assert.Equal(t, int64(32), n)
} }
type errorReader struct {
err error
}
func (er errorReader) Read(p []byte) (n int, err error) {
return 0, er.err
}
type closeDetector struct { type closeDetector struct {
io.Reader io.Reader
closed int closed int
@@ -813,7 +879,7 @@ func (c *closeDetector) Close() error {
func TestNewDecrypter(t *testing.T) { func TestNewDecrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
cd := newCloseDetector(bytes.NewBuffer(file0)) cd := newCloseDetector(bytes.NewBuffer(file0))
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
@@ -831,7 +897,7 @@ func TestNewDecrypter(t *testing.T) {
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
} }
er := &readers.ErrorReader{Err: errors.New("potato")} er := &errorReader{errors.New("potato")}
cd = newCloseDetector(er) cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd) fh, err = c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
@@ -857,14 +923,14 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF} in2 := &errorReader{io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16) in1 := bytes.NewBuffer(file16)
in := ioutil.NopCloser(io.MultiReader(in1, in2)) in := ioutil.NopCloser(io.MultiReader(in1, in2))
fh, err := c.newDecrypter(in) fh, err := c.newDecrypter(in)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6) n, err := io.CopyN(ioutil.Discard, fh, 1E6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n) assert.Equal(t, int64(16), n)
} }
@@ -894,7 +960,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
// Open stream with a seek of underlyingOffset // Open stream with a seek of underlyingOffset
var reader io.ReadCloser var reader io.ReadCloser
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { open := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
end := len(ciphertext) end := len(ciphertext)
if underlyingLimit >= 0 { if underlyingLimit >= 0 {
end = int(underlyingOffset + underlyingLimit) end = int(underlyingOffset + underlyingLimit)
@@ -929,13 +995,13 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
assert.Equal(t, 0, n) assert.Equal(t, 0, n)
} }
// Now try decoding it with an open/seek // Now try decoding it with a open/seek
for _, offset := range trials { for _, offset := range trials {
for _, limit := range limits { for _, limit := range limits {
if offset+limit > len(plaintext) { if offset+limit > len(plaintext) {
continue continue
} }
rc, err := c.DecryptDataSeek(context.Background(), open, int64(offset), int64(limit)) rc, err := c.DecryptDataSeek(open, int64(offset), int64(limit))
assert.NoError(t, err) assert.NoError(t, err)
check(rc, offset, limit) check(rc, offset, limit)
@@ -943,14 +1009,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
} }
// Try decoding it with a single open and lots of seeks // Try decoding it with a single open and lots of seeks
fh, err := c.DecryptDataSeek(context.Background(), open, 0, -1) fh, err := c.DecryptDataSeek(open, 0, -1)
assert.NoError(t, err) assert.NoError(t, err)
for _, offset := range trials { for _, offset := range trials {
for _, limit := range limits { for _, limit := range limits {
if offset+limit > len(plaintext) { if offset+limit > len(plaintext) {
continue continue
} }
_, err := fh.RangeSeek(context.Background(), int64(offset), io.SeekStart, int64(limit)) _, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit))
assert.NoError(t, err) assert.NoError(t, err)
check(fh, offset, limit) check(fh, offset, limit)
@@ -1001,7 +1067,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
} { } {
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit) what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
callCount := 0 callCount := 0
testOpen := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { testOpen := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
switch callCount { switch callCount {
case 0: case 0:
assert.Equal(t, int64(0), underlyingOffset, what) assert.Equal(t, int64(0), underlyingOffset, what)
@@ -1013,11 +1079,11 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
t.Errorf("Too many calls %d for %s", callCount+1, what) t.Errorf("Too many calls %d for %s", callCount+1, what)
} }
callCount++ callCount++
return open(ctx, underlyingOffset, underlyingLimit) return open(underlyingOffset, underlyingLimit)
} }
fh, err := c.DecryptDataSeek(context.Background(), testOpen, 0, -1) fh, err := c.DecryptDataSeek(testOpen, 0, -1)
assert.NoError(t, err) assert.NoError(t, err)
gotOffset, err := fh.RangeSeek(context.Background(), test.offset, io.SeekStart, test.limit) gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, gotOffset, test.offset) assert.Equal(t, gotOffset, test.offset)
} }
@@ -1111,7 +1177,7 @@ func TestDecrypterRead(t *testing.T) {
// Test producing an error on the file on Read the underlying file // Test producing an error on the file on Read the underlying file
in1 := bytes.NewBuffer(file1) in1 := bytes.NewBuffer(file1)
in2 := &readers.ErrorReader{Err: errors.New("potato")} in2 := &errorReader{errors.New("potato")}
in := io.MultiReader(in1, in2) in := io.MultiReader(in1, in2)
cd := newCloseDetector(in) cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)

View File

@@ -2,58 +2,54 @@
package crypt package crypt
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"path" "path"
"strconv"
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
) )
// Globals // Globals
var (
// Flags
cryptShowMapping = flags.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
)
// Register with Fs // Register with Fs
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
Name: "crypt", Name: "crypt",
Description: "Encrypt/Decrypt a remote", Description: "Encrypt/Decrypt a remote",
NewFs: NewFs, NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, { }, {
Name: "filename_encryption", Name: "filename_encryption",
Help: "How to encrypt the filenames.", Help: "How to encrypt the filenames.",
Default: "standard",
Examples: []fs.OptionExample{ Examples: []fs.OptionExample{
{ {
Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
}, {
Value: "standard", Value: "standard",
Help: "Encrypt the filenames see the docs for the details.", Help: "Encrypt the filenames see the docs for the details.",
}, { }, {
Value: "obfuscate", Value: "obfuscate",
Help: "Very simple filename obfuscation.", Help: "Very simple filename obfuscation.",
}, {
Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
}, },
}, },
}, { }, {
Name: "directory_name_encryption", Name: "directory_name_encryption",
Help: `Option to either encrypt directory names or leave them intact. Help: "Option to either encrypt directory names or leave them intact.",
NB If filename_encryption is "off" then this option will do nothing.`,
Default: true,
Examples: []fs.OptionExample{ Examples: []fs.OptionExample{
{ {
Value: "true", Value: "true",
@@ -68,125 +64,74 @@ NB If filename_encryption is "off" then this option will do nothing.`,
Name: "password", Name: "password",
Help: "Password or pass phrase for encryption.", Help: "Password or pass phrase for encryption.",
IsPassword: true, IsPassword: true,
Required: true,
}, { }, {
Name: "password2", Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.", Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
IsPassword: true, IsPassword: true,
}, { Optional: true,
Name: "server_side_across_configs",
Default: false,
Help: `Allow server side operations (eg copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it.
This can be used, for example, to change file name encryption type
without re-uploading all the data. Just make two crypt backends
pointing to two different directories with the single changed
parameter and use rclone move to move the files between the crypt
remotes.`,
Advanced: true,
}, {
Name: "show_mapping",
Help: `For all files listed show how the names encrypt.
If this flag is set then for each file that the remote is asked to
list, it will log (at level INFO) a line stating the decrypted file
name and the encrypted file name.
This is so you can work out which encrypted names are which decrypted
names just in case you need to do something with the encrypted file
names, or for debugging purposes.`,
Default: false,
Hide: fs.OptionHideConfigurator,
Advanced: true,
}}, }},
}) })
} }
// newCipherForConfig constructs a Cipher for the given config name // NewCipher constructs a Cipher for the given config name
func newCipherForConfig(opt *Options) (*Cipher, error) { func NewCipher(name string) (Cipher, error) {
mode, err := NewNameEncryptionMode(opt.FilenameEncryption) mode, err := NewNameEncryptionMode(config.FileGet(name, "filename_encryption", "standard"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
if opt.Password == "" { dirNameEncrypt, err := strconv.ParseBool(config.FileGet(name, "directory_name_encryption", "true"))
if err != nil {
return nil, err
}
password := config.FileGet(name, "password", "")
if password == "" {
return nil, errors.New("password not set in config file") return nil, errors.New("password not set in config file")
} }
password, err := obscure.Reveal(opt.Password) password, err = obscure.Reveal(password)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password") return nil, errors.Wrap(err, "failed to decrypt password")
} }
var salt string salt := config.FileGet(name, "password2", "")
if opt.Password2 != "" { if salt != "" {
salt, err = obscure.Reveal(opt.Password2) salt, err = obscure.Reveal(salt)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password2") return nil, errors.Wrap(err, "failed to decrypt password2")
} }
} }
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption) cipher, err := newCipher(mode, password, salt, dirNameEncrypt)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to make cipher") return nil, errors.Wrap(err, "failed to make cipher")
} }
return cipher, nil return cipher, nil
} }
// NewCipher constructs a Cipher for the given config // NewFs contstructs an Fs from the path, container:path
func NewCipher(m configmap.Mapper) (*Cipher, error) { func NewFs(name, rpath string) (fs.Fs, error) {
// Parse config into Options struct cipher, err := NewCipher(name)
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return newCipherForConfig(opt) remote := config.FileGet(name, "remote")
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
cipher, err := newCipherForConfig(opt)
if err != nil {
return nil, err
}
remote := opt.Remote
if strings.HasPrefix(remote, name+":") { if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting") return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
} }
// Make sure to remove trailing . reffering to the current dir
if path.Base(rpath) == "." {
rpath = strings.TrimSuffix(rpath, ".")
}
// Look for a file first // Look for a file first
var wrappedFs fs.Fs remotePath := path.Join(remote, cipher.EncryptFileName(rpath))
if rpath == "" { wrappedFs, err := fs.NewFs(remotePath)
wrappedFs, err = cache.Get(remote) // if that didn't produce a file, look for a directory
} else { if err != fs.ErrorIsFile {
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath)) remotePath = path.Join(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = cache.Get(remotePath) wrappedFs, err = fs.NewFs(remotePath)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = cache.Get(remotePath)
}
} }
if err != fs.ErrorIsFile && err != nil { if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote) return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remotePath)
} }
f := &Fs{ f := &Fs{
Fs: wrappedFs, Fs: wrappedFs,
name: name, name: name,
root: rpath, root: rpath,
opt: *opt,
cipher: cipher, cipher: cipher,
} }
cache.PinUntilFinalized(f.Fs, f)
// the features here are ones we could support, and they are // the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs // ANDed with the ones from wrappedFs
f.features = (&fs.Features{ f.features = (&fs.Features{
@@ -196,34 +141,33 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
WriteMimeType: false, WriteMimeType: false,
BucketBased: true, BucketBased: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
SetTier: true,
GetTier: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs) }).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
return f, err doChangeNotify := wrappedFs.Features().ChangeNotify
} if doChangeNotify != nil {
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
decrypted, err := f.DecryptFileName(path)
if err != nil {
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
return
}
notifyFunc(decrypted, entryType)
}
return doChangeNotify(wrappedNotifyFunc, pollInterval)
}
}
// Options defines the configuration for this backend return f, err
type Options struct {
Remote string `config:"remote"`
FilenameEncryption string `config:"filename_encryption"`
DirectoryNameEncryption bool `config:"directory_name_encryption"`
Password string `config:"password"`
Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"`
} }
// Fs represents a wrapped fs.Fs // Fs represents a wrapped fs.Fs
type Fs struct { type Fs struct {
fs.Fs fs.Fs
wrapper fs.Fs
name string name string
root string root string
opt Options
features *fs.Features // optional features features *fs.Features // optional features
cipher *Cipher cipher Cipher
} }
// Name of the remote (as passed into NewFs) // Name of the remote (as passed into NewFs)
@@ -254,35 +198,35 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
fs.Debugf(remote, "Skipping undecryptable file name: %v", err) fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
return return
} }
if f.opt.ShowMapping { if *cryptShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote) fs.Logf(decryptedRemote, "Encrypts to %q", remote)
} }
*entries = append(*entries, f.newObject(obj)) *entries = append(*entries, f.newObject(obj))
} }
// Encrypt a directory file name to entries. // Encrypt an directory file name to entries.
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) { func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote() remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote) decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil { if err != nil {
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err) fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
return return
} }
if f.opt.ShowMapping { if *cryptShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote) fs.Logf(decryptedRemote, "Encrypts to %q", remote)
} }
*entries = append(*entries, f.newDir(ctx, dir)) *entries = append(*entries, f.newDir(dir))
} }
// Encrypt some directory entries. This alters entries returning it as newEntries. // Encrypt some directory entries. This alters entries returning it as newEntries.
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) { func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter newEntries = entries[:0] // in place filter
for _, entry := range entries { for _, entry := range entries {
switch x := entry.(type) { switch x := entry.(type) {
case fs.Object: case fs.Object:
f.add(&newEntries, x) f.add(&newEntries, x)
case fs.Directory: case fs.Directory:
f.addDir(ctx, &newEntries, x) f.addDir(&newEntries, x)
default: default:
return nil, errors.Errorf("Unknown object type %T", entry) return nil, errors.Errorf("Unknown object type %T", entry)
} }
@@ -299,12 +243,12 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir)) entries, err = f.Fs.List(f.cipher.EncryptDirName(dir))
if err != nil { if err != nil {
return nil, err return nil, err
} }
return f.encryptEntries(ctx, entries) return f.encryptEntries(entries)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -323,9 +267,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// //
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
return f.Fs.Features().ListR(ctx, f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error { return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
newEntries, err := f.encryptEntries(ctx, entries) newEntries, err := f.encryptEntries(entries)
if err != nil { if err != nil {
return err return err
} }
@@ -334,20 +278,20 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
} }
// NewObject finds the Object at remote. // NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(ctx, f.cipher.EncryptFileName(remote)) o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote))
if err != nil { if err != nil {
return nil, err return nil, err
} }
return f.newObject(o), nil return f.newObject(o), nil
} }
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
// put implements Put or PutStream // put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
// Encrypt the data into wrappedIn // Encrypt the data into wrappedIn
wrappedIn, encrypter, err := f.cipher.encryptData(in) wrappedIn, err := f.cipher.EncryptData(in)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -361,17 +305,11 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil { if err != nil {
return nil, err return nil, err
} }
// unwrap the accounting
var wrap accounting.WrapFn
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
// add the hasher
wrappedIn = io.TeeReader(wrappedIn, hasher) wrappedIn = io.TeeReader(wrappedIn, hasher)
// wrap the accounting back on
wrappedIn = wrap(wrappedIn)
} }
// Transfer the data // Transfer the data
o, err := put(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce), options...) o, err := put(wrappedIn, f.newObjectInfo(src), options...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -380,13 +318,13 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if ht != hash.None && hasher != nil { if ht != hash.None && hasher != nil {
srcHash := hasher.Sums()[ht] srcHash := hasher.Sums()[ht]
var dstHash string var dstHash string
dstHash, err = o.Hash(ctx, ht) dstHash, err = o.Hash(ht)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash") return nil, errors.Wrap(err, "failed to read destination hash")
} }
if srcHash != "" && dstHash != "" && srcHash != dstHash { if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object // remove object
err = o.Remove(ctx) err = o.Remove()
if err != nil { if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err) fs.Errorf(o, "Failed to remove corrupted object: %v", err)
} }
@@ -402,13 +340,13 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// May create the object even if it returns an error - if so // May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return // will return the object and the error, otherwise will return
// nil and the error // nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(ctx, in, src, options, f.Fs.Put) return f.put(in, src, options, f.Fs.Put)
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(ctx, in, src, options, f.Fs.Features().PutStream) return f.put(in, src, options, f.Fs.Features().PutStream)
} }
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
@@ -419,29 +357,29 @@ func (f *Fs) Hashes() hash.Set {
// Mkdir makes the directory (container, bucket) // Mkdir makes the directory (container, bucket)
// //
// Shouldn't return an error if it already exists // Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir)) return f.Fs.Mkdir(f.cipher.EncryptDirName(dir))
} }
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir)) return f.Fs.Rmdir(f.cipher.EncryptDirName(dir))
} }
// Purge all files in the directory specified // Purge all files in the root and the root directory
// //
// Implement this if you have a way of deleting all the files // Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List() // quicker than just running Remove() on the result of List()
// //
// Return an error if it doesn't exist // Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error { func (f *Fs) Purge() error {
do := f.Fs.Features().Purge do := f.Fs.Features().Purge
if do == nil { if do == nil {
return fs.ErrorCantPurge return fs.ErrorCantPurge
} }
return do(ctx, f.cipher.EncryptDirName(dir)) return do()
} }
// Copy src to this remote using server side copy operations. // Copy src to this remote using server side copy operations.
@@ -453,7 +391,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Copy do := f.Fs.Features().Copy
if do == nil { if do == nil {
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
@@ -462,7 +400,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
if !ok { if !ok {
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote)) oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -478,7 +416,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Move do := f.Fs.Features().Move
if do == nil { if do == nil {
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
@@ -487,7 +425,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if !ok { if !ok {
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote)) oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -502,7 +440,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
do := f.Fs.Features().DirMove do := f.Fs.Features().DirMove
if do == nil { if do == nil {
return fs.ErrorCantDirMove return fs.ErrorCantDirMove
@@ -512,23 +450,23 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove return fs.ErrorCantDirMove
} }
return do(ctx, srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote)) return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
} }
// PutUnchecked uploads the object // PutUnchecked uploads the object
// //
// This will create a duplicate if we upload a new file without // This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that. // checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutUnchecked do := f.Fs.Features().PutUnchecked
if do == nil { if do == nil {
return nil, errors.New("can't PutUnchecked") return nil, errors.New("can't PutUnchecked")
} }
wrappedIn, encrypter, err := f.cipher.encryptData(in) wrappedIn, err := f.cipher.EncryptData(in)
if err != nil { if err != nil {
return nil, err return nil, err
} }
o, err := do(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce)) o, err := do(wrappedIn, f.newObjectInfo(src))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -539,21 +477,21 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// //
// Implement this if you have a way of emptying the trash or // Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files. // otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error { func (f *Fs) CleanUp() error {
do := f.Fs.Features().CleanUp do := f.Fs.Features().CleanUp
if do == nil { if do == nil {
return errors.New("can't CleanUp") return errors.New("can't CleanUp")
} }
return do(ctx) return do()
} }
// About gets quota information from the Fs // About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About() (*fs.Usage, error) {
do := f.Fs.Features().About do := f.Fs.Features().About
if do == nil { if do == nil {
return nil, errors.New("About not supported") return nil, errors.New("About not supported")
} }
return do(ctx) return do()
} }
// UnWrap returns the Fs that this Fs is wrapping // UnWrap returns the Fs that this Fs is wrapping
@@ -561,16 +499,6 @@ func (f *Fs) UnWrap() fs.Fs {
return f.Fs return f.Fs
} }
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// EncryptFileName returns an encrypted file name // EncryptFileName returns an encrypted file name
func (f *Fs) EncryptFileName(fileName string) string { func (f *Fs) EncryptFileName(fileName string) string {
return f.cipher.EncryptFileName(fileName) return f.cipher.EncryptFileName(fileName)
@@ -581,49 +509,18 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
return f.cipher.DecryptFileName(encryptedFileName) return f.cipher.DecryptFileName(encryptedFileName)
} }
// computeHashWithNonce takes the nonce and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Open the src for input
in, err := src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
}
// ComputeHash takes the nonce from o, and encrypts the contents of // ComputeHash takes the nonce from o, and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly // src with it, and calcuates the hash given by HashType on the fly
// //
// Note that we break lots of encapsulation in this function. // Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) { func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Read the nonce - opening the file is sufficient to read the nonce in // Read the nonce - opening the file is sufficient to read the nonce in
// use a limited read so we only read the header // use a limited read so we only read the header
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1}) in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce") return "", errors.Wrap(err, "failed to open object to read nonce")
} }
d, err := f.cipher.newDecrypter(in) d, err := f.cipher.(*cipher).newDecrypter(in)
if err != nil { if err != nil {
_ = in.Close() _ = in.Close()
return "", errors.Wrap(err, "failed to open object to read nonce") return "", errors.Wrap(err, "failed to open object to read nonce")
@@ -648,137 +545,30 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
return "", errors.Wrap(err, "failed to close nonce read") return "", errors.Wrap(err, "failed to close nonce read")
} }
return f.computeHashWithNonce(ctx, nonce, src, hashType) // Open the src for input
} in, err = src.Open()
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
do := f.Fs.Features().MergeDirs
if do == nil {
return errors.New("MergeDirs not supported")
}
out := make([]fs.Directory, len(dirs))
for i, dir := range dirs {
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
}
return do(ctx, out)
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
do := f.Fs.Features().DirCacheFlush
if do != nil {
do()
}
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
do := f.Fs.Features().PublicLink
if do == nil {
return "", errors.New("PublicLink not supported")
}
o, err := f.NewObject(ctx, remote)
if err != nil { if err != nil {
// assume it is a directory return "", errors.Wrap(err, "failed to open src")
return do(ctx, f.cipher.EncryptDirName(remote), expire, unlink)
} }
return do(ctx, o.(*Object).Object.Remote(), expire, unlink) defer fs.CheckClose(in, &err)
}
// ChangeNotify calls the passed function with a path // Now encrypt the src with the nonce
// that has had changes. If the implementation out, err := f.cipher.(*cipher).newEncrypter(in, &nonce)
// uses polling, it should adhere to the given interval. if err != nil {
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { return "", errors.Wrap(err, "failed to make encrypter")
do := f.Fs.Features().ChangeNotify
if do == nil {
return
} }
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType) // pipe into hash
var ( m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
err error if err != nil {
decrypted string return "", errors.Wrap(err, "failed to make hasher")
)
switch entryType {
case fs.EntryDirectory:
decrypted, err = f.cipher.DecryptDirName(path)
case fs.EntryObject:
decrypted, err = f.cipher.DecryptFileName(path)
default:
fs.Errorf(path, "crypt ChangeNotify: ignoring unknown EntryType %d", entryType)
return
}
if err != nil {
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
return
}
notifyFunc(decrypted, entryType)
} }
do(ctx, wrappedNotifyFunc, pollIntervalChan) _, err = io.Copy(m, out)
} if err != nil {
return "", errors.Wrap(err, "failed to hash data")
var commandHelp = []fs.CommandHelp{
{
Name: "encode",
Short: "Encode the given filename(s)",
Long: `This encodes the filenames given as arguments returning a list of
strings of the encoded results.
Usage Example:
rclone backend encode crypt: file1 [file2...]
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
`,
},
{
Name: "decode",
Short: "Decode the given filename(s)",
Long: `This decodes the filenames given as arguments returning a list of
strings of the decoded results. It will return an error if any of the
inputs are invalid.
Usage Example:
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
`,
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "decode":
out := make([]string, 0, len(arg))
for _, encryptedFileName := range arg {
fileName, err := f.DecryptFileName(encryptedFileName)
if err != nil {
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
}
out = append(out, fileName)
}
return out, nil
case "encode":
out := make([]string, 0, len(arg))
for _, fileName := range arg {
encryptedFileName := f.EncryptFileName(fileName)
out = append(out, encryptedFileName)
}
return out, nil
default:
return nil, fs.ErrorCommandNotFound
} }
return m.Sums()[hashType], nil
} }
// Object describes a wrapped for being read from the Fs // Object describes a wrapped for being read from the Fs
@@ -831,7 +621,7 @@ func (o *Object) Size() int64 {
// Hash returns the selected checksum of the file // Hash returns the selected checksum of the file
// If no checksum is available it returns "" // If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { func (o *Object) Hash(ht hash.Type) (string, error) {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -841,7 +631,7 @@ func (o *Object) UnWrap() fs.Object {
} }
// Open opens the file for read. Call Close() on the returned io.ReadCloser // Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var openOptions []fs.OpenOption var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
for _, option := range options { for _, option := range options {
@@ -855,10 +645,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
openOptions = append(openOptions, option) openOptions = append(openOptions, option)
} }
} }
rc, err = o.f.cipher.DecryptDataSeek(ctx, func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
if underlyingOffset == 0 && underlyingLimit < 0 { if underlyingOffset == 0 && underlyingLimit < 0 {
// Open with no seek // Open with no seek
return o.Object.Open(ctx, openOptions...) return o.Object.Open(openOptions...)
} }
// Open stream with a range of underlyingOffset, underlyingLimit // Open stream with a range of underlyingOffset, underlyingLimit
end := int64(-1) end := int64(-1)
@@ -869,7 +659,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
} }
} }
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end}) newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
return o.Object.Open(ctx, newOpenOptions...) return o.Object.Open(newOpenOptions...)
}, offset, limit) }, offset, limit)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -878,43 +668,25 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
} }
// Update in to the object with the modTime given of the given size // Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return o.Object, o.Object.Update(ctx, in, src, options...) return o.Object, o.Object.Update(in, src, options...)
} }
_, err := o.f.put(ctx, in, src, options, update) _, err := o.f.put(in, src, options, update)
return err return err
} }
// newDir returns a dir with the Name decrypted // newDir returns a dir with the Name decrypted
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory { func (f *Fs) newDir(dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(ctx, dir) new := fs.NewDirCopy(dir)
remote := dir.Remote() remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote) decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil { if err != nil {
fs.Debugf(remote, "Undecryptable dir name: %v", err) fs.Debugf(remote, "Undecryptable dir name: %v", err)
} else { } else {
newDir.SetRemote(decryptedRemote) new.SetRemote(decryptedRemote)
} }
return newDir return new
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.Fs.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.Fs.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
} }
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source // ObjectInfo describes a wrapped fs.ObjectInfo for being the source
@@ -922,15 +694,13 @@ func (f *Fs) Disconnect(ctx context.Context) error {
// This encrypts the remote name and adjusts the size // This encrypts the remote name and adjusts the size
type ObjectInfo struct { type ObjectInfo struct {
fs.ObjectInfo fs.ObjectInfo
f *Fs f *Fs
nonce nonce
} }
func (f *Fs) newObjectInfo(src fs.ObjectInfo, nonce nonce) *ObjectInfo { func (f *Fs) newObjectInfo(src fs.ObjectInfo) *ObjectInfo {
return &ObjectInfo{ return &ObjectInfo{
ObjectInfo: src, ObjectInfo: src,
f: f, f: f,
nonce: nonce,
} }
} }
@@ -955,55 +725,10 @@ func (o *ObjectInfo) Size() int64 {
// Hash returns the selected checksum of the file // Hash returns the selected checksum of the file
// If no checksum is available it returns "" // If no checksum is available it returns ""
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) { func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
var srcObj fs.Object
var ok bool
// Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Otherwise likely is an operations.OverrideRemote
srcObj = do.UnWrap()
} else {
return "", nil
}
// if this is wrapping a local object then we work out the hash
if srcObj.Fs().Features().IsLocal {
// Read the data and encrypt it to calculate the hash
fs.Debugf(o, "Computing %v hash of encrypted source", hash)
return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash)
}
return "", nil return "", nil
} }
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
func (o *Object) SetTier(tier string) error {
do, ok := o.Object.(fs.SetTierer)
if !ok {
return errors.New("crypt: underlying remote does not support SetTier")
}
return do.SetTier(tier)
}
// GetTier returns storage tier or class of the Object
func (o *Object) GetTier() string {
do, ok := o.Object.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
@@ -1011,24 +736,13 @@ var (
_ fs.Copier = (*Fs)(nil) _ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil) _ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil) _ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil) _ fs.UnWrapper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil) _ fs.ListRer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil) _ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil) _ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.GetTierer = (*Object)(nil)
) )

View File

@@ -1,143 +0,0 @@
package crypt
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"io"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
localFs, err := fs.TemporaryLocalFs()
require.NoError(t, err)
cleanup = func() {
require.NoError(t, localFs.Rmdir(context.Background(), ""))
}
return localFs, cleanup
}
// Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err)
cleanup = func() {
require.NoError(t, obj.Remove(context.Background()))
}
return obj, cleanup
}
// Test the ObjectInfo
func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var (
contents = random.String(100)
path = "hash_test_object"
ctx = context.Background()
)
if wrap {
path = "_wrap"
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
// encrypt the data
inBuf := bytes.NewBufferString(contents)
var outBuf bytes.Buffer
enc, err := f.cipher.newEncrypter(inBuf, nil)
require.NoError(t, err)
nonce := enc.nonce // read the nonce at the start
_, err = io.Copy(&outBuf, enc)
require.NoError(t, err)
var oi fs.ObjectInfo = obj
if wrap {
// wrap the object in an fs.ObjectUnwrapper if required
oi = testWrapper{oi}
}
// wrap the object in a crypt for upload using the nonce we
// saved from the encryptor
src := f.newObjectInfo(oi, nonce)
// Test ObjectInfo methods
assert.Equal(t, int64(outBuf.Len()), src.Size())
assert.Equal(t, f, src.Fs())
assert.NotEqual(t, path, src.Remote())
// Test ObjectInfo.Hash
wantHash := md5.Sum(outBuf.Bytes())
gotHash, err := src.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash)
}
func testComputeHash(t *testing.T, f *Fs) {
var (
contents = random.String(100)
path = "compute_hash_test"
ctx = context.Background()
hashType = f.Fs.Hashes().GetOne()
)
if hashType == hash.None {
t.Skipf("%v: does not support hashes", f.Fs)
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
// Upload a file to localFs as a test object
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
// Upload the same data to the remote Fs also
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
// Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
require.NoError(t, err)
// Test computed hash matches remote object hash
remoteObjHash, err := remoteObj.(*Object).Object.Hash(ctx, hashType)
require.NoError(t, err)
assert.Equal(t, remoteObjHash, computedHash)
}
// InternalTest is called by fstests.Run to extra tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("ObjectInfo", func(t *testing.T) { testObjectInfo(t, f, false) })
t.Run("ObjectInfoWrap", func(t *testing.T) { testObjectInfo(t, f, true) })
t.Run("ComputeHash", func(t *testing.T) { testComputeHash(t, f) })
}

View File

@@ -6,33 +6,14 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/rclone/rclone/backend/crypt" "github.com/ncw/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive" // for integration tests _ "github.com/ncw/rclone/backend/local"
_ "github.com/rclone/rclone/backend/local" "github.com/ncw/rclone/fs/config/obscure"
_ "github.com/rclone/rclone/backend/swift" // for integration tests "github.com/ncw/rclone/fstest/fstests"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
// TestStandard runs integration tests against the remote // TestStandard runs integration tests against the remote
func TestStandard(t *testing.T) { func TestStandard(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard") tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt" name := "TestCrypt"
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
@@ -44,16 +25,11 @@ func TestStandard(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
// TestOff runs integration tests against the remote // TestOff runs integration tests against the remote
func TestOff(t *testing.T) { func TestOff(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off") tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
name := "TestCrypt2" name := "TestCrypt2"
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
@@ -65,16 +41,11 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"}, {Name: name, Key: "filename_encryption", Value: "off"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
// TestObfuscate runs integration tests against the remote // TestObfuscate runs integration tests against the remote
func TestObfuscate(t *testing.T) { func TestObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate") tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt3" name := "TestCrypt3"
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
@@ -86,8 +57,6 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "obfuscate"}, {Name: name, Key: "filename_encryption", Value: "obfuscate"},
}, },
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }

3454
backend/drive/drive.go Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,85 +1,63 @@
package drive package drive
import ( import (
"bytes"
"context"
"encoding/json" "encoding/json"
"io"
"io/ioutil"
"mime"
"path/filepath"
"strings"
"testing" "testing"
"time"
"google.golang.org/api/drive/v3"
"github.com/pkg/errors" "github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3"
) )
func TestDriveScopes(t *testing.T) { const exampleExportFormats = `{
for _, test := range []struct { "application/vnd.google-apps.document": [
in string "application/rtf",
want []string "application/vnd.oasis.opendocument.text",
wantFlag bool "text/html",
}{ "application/pdf",
{"", []string{ "application/epub+zip",
"https://www.googleapis.com/auth/drive", "application/zip",
}, false}, "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
{" drive.file , drive.readonly", []string{ "text/plain"
"https://www.googleapis.com/auth/drive.file", ],
"https://www.googleapis.com/auth/drive.readonly", "application/vnd.google-apps.spreadsheet": [
}, false}, "application/x-vnd.oasis.opendocument.spreadsheet",
{" drive.file , drive.appfolder", []string{ "text/tab-separated-values",
"https://www.googleapis.com/auth/drive.file", "application/pdf",
"https://www.googleapis.com/auth/drive.appfolder", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
}, true}, "text/csv",
} { "application/zip",
got := driveScopes(test.in) "application/vnd.oasis.opendocument.spreadsheet"
assert.Equal(t, test.want, got, test.in) ],
gotFlag := driveScopesContainsAppFolder(got) "application/vnd.google-apps.jam": [
assert.Equal(t, test.wantFlag, gotFlag, test.in) "application/pdf"
} ],
} "application/vnd.google-apps.script": [
"application/vnd.google-apps.script+json"
],
"application/vnd.google-apps.presentation": [
"application/vnd.oasis.opendocument.presentation",
"application/pdf",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
"text/plain"
],
"application/vnd.google-apps.form": [
"application/zip"
],
"application/vnd.google-apps.drawing": [
"image/svg+xml",
"image/png",
"application/pdf",
"image/jpeg"
]
}`
/* var exportFormats map[string][]string
var additionalMimeTypes = map[string]string{
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
"application/vnd.ms-excel.template.macroenabled.12": ".xltm",
"application/vnd.ms-powerpoint.presentation.macroenabled.12": ".pptm",
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": ".ppsm",
"application/vnd.ms-powerpoint.template.macroenabled.12": ".potm",
"application/vnd.ms-powerpoint": ".ppt",
"application/vnd.ms-word.document.macroenabled.12": ".docm",
"application/vnd.ms-word.template.macroenabled.12": ".dotm",
"application/vnd.openxmlformats-officedocument.presentationml.template": ".potx",
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": ".xltx",
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": ".dotx",
"application/vnd.sun.xml.writer": ".sxw",
"text/richtext": ".rtf",
}
*/
// Load the example export formats into exportFormats for testing // Load the example export formats into exportFormats for testing
func TestInternalLoadExampleFormats(t *testing.T) { func TestInternalLoadExampleExportFormats(t *testing.T) {
fetchFormatsOnce.Do(func() {}) assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &exportFormats))
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
var about struct {
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
ImportFormats map[string][]string `json:"importFormats,omitempty"`
}
require.NoError(t, err)
require.NoError(t, json.Unmarshal(buf, &about))
_exportFormats = fixMimeTypeMap(about.ExportFormats)
_importFormats = fixMimeTypeMap(about.ImportFormats)
} }
func TestInternalParseExtensions(t *testing.T) { func TestInternalParseExtensions(t *testing.T) {
@@ -88,342 +66,47 @@ func TestInternalParseExtensions(t *testing.T) {
want []string want []string
wantErr error wantErr error
}{ }{
{"doc", []string{".doc"}, nil}, {"doc", []string{"doc"}, nil},
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil}, {" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
{"docx,svg,Docx", []string{".docx", ".svg"}, nil}, {"docx,svg,Docx", []string{"docx", "svg"}, nil},
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)}, {"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
} { } {
extensions, _, gotErr := parseExtensions(test.in) f := new(Fs)
gotErr := f.parseExtensions(test.in)
if test.wantErr == nil { if test.wantErr == nil {
assert.NoError(t, gotErr) assert.NoError(t, gotErr)
} else { } else {
assert.EqualError(t, gotErr, test.wantErr.Error()) assert.EqualError(t, gotErr, test.wantErr.Error())
} }
assert.Equal(t, test.want, extensions) assert.Equal(t, test.want, f.extensions)
} }
// Test it is appending // Test it is appending
extensions, _, gotErr := parseExtensions("docx,svg", "docx,svg,xlsx") f := new(Fs)
assert.NoError(t, gotErr) assert.Nil(t, f.parseExtensions("docx,svg"))
assert.Equal(t, []string{".docx", ".svg", ".xlsx"}, extensions) assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
} }
func TestInternalFindExportFormat(t *testing.T) { func TestInternalFindExportFormat(t *testing.T) {
item := &drive.File{ item := new(drive.File)
Name: "file", item.MimeType = "application/vnd.google-apps.document"
MimeType: "application/vnd.google-apps.document",
}
for _, test := range []struct { for _, test := range []struct {
extensions []string extensions []string
wantExtension string wantExtension string
wantMimeType string wantMimeType string
}{ }{
{[]string{}, "", ""}, {[]string{}, "", ""},
{[]string{".pdf"}, ".pdf", "application/pdf"}, {[]string{"pdf"}, "pdf", "application/pdf"},
{[]string{".pdf", ".rtf", ".xls"}, ".pdf", "application/pdf"}, {[]string{"pdf", "rtf", "xls"}, "pdf", "application/pdf"},
{[]string{".xls", ".rtf", ".pdf"}, ".rtf", "application/rtf"}, {[]string{"xls", "rtf", "pdf"}, "rtf", "application/rtf"},
{[]string{".xls", ".csv", ".svg"}, "", ""}, {[]string{"xls", "csv", "svg"}, "", ""},
} { } {
f := new(Fs) f := new(Fs)
f.exportExtensions = test.extensions f.extensions = test.extensions
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item) gotExtension, gotMimeType := f.findExportFormat("file", exportFormats[item.MimeType])
assert.Equal(t, test.wantExtension, gotExtension) assert.Equal(t, test.wantExtension, gotExtension)
if test.wantExtension != "" {
assert.Equal(t, item.Name+gotExtension, gotFilename)
} else {
assert.Equal(t, "", gotFilename)
}
assert.Equal(t, test.wantMimeType, gotMimeType) assert.Equal(t, test.wantMimeType, gotMimeType)
assert.Equal(t, true, gotIsDocument)
} }
} }
func TestMimeTypesToExtension(t *testing.T) {
for mimeType, extension := range _mimeTypeToExtension {
extensions, err := mime.ExtensionsByType(mimeType)
assert.NoError(t, err)
assert.Contains(t, extensions, extension)
}
}
func TestExtensionToMimeType(t *testing.T) {
for mimeType, extension := range _mimeTypeToExtension {
gotMimeType := mime.TypeByExtension(extension)
mediatype, _, err := mime.ParseMediaType(gotMimeType)
assert.NoError(t, err)
assert.Equal(t, mimeType, mediatype)
}
}
func TestExtensionsForExportFormats(t *testing.T) {
if _exportFormats == nil {
t.Error("exportFormats == nil")
}
for fromMT, toMTs := range _exportFormats {
for _, toMT := range toMTs {
if !isInternalMimeType(toMT) {
extensions, err := mime.ExtensionsByType(toMT)
assert.NoError(t, err, "invalid MIME type %q", toMT)
assert.NotEmpty(t, extensions, "No extension found for %q (from: %q)", fromMT, toMT)
}
}
}
}
func TestExtensionsForImportFormats(t *testing.T) {
t.Skip()
if _importFormats == nil {
t.Error("_importFormats == nil")
}
for fromMT := range _importFormats {
if !isInternalMimeType(fromMT) {
extensions, err := mime.ExtensionsByType(fromMT)
assert.NoError(t, err, "invalid MIME type %q", fromMT)
assert.NotEmpty(t, extensions, "No extension found for %q", fromMT)
}
}
}
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
oldAllow := f.opt.AllowImportNameChange
f.opt.AllowImportNameChange = true
defer func() {
f.opt.AllowImportNameChange = oldAllow
}()
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err)
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.doc", "example2.doc")
require.NoError(t, err)
}
func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err)
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.xlsx", "example1.ods")
require.NoError(t, err)
}
func (f *Fs) InternalTestDocumentExport(t *testing.T) {
var buf bytes.Buffer
var err error
f.exportExtensions, _, err = parseExtensions("txt")
require.NoError(t, err)
obj, err := f.NewObject(context.Background(), "example2.txt")
require.NoError(t, err)
rc, err := obj.Open(context.Background())
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
_, err = io.Copy(&buf, rc)
require.NoError(t, err)
text := buf.String()
for _, excerpt := range []string{
"Lorem ipsum dolor sit amet, consectetur",
"porta at ultrices in, consectetur at augue.",
} {
require.Contains(t, text, excerpt)
}
}
func (f *Fs) InternalTestDocumentLink(t *testing.T) {
var buf bytes.Buffer
var err error
f.exportExtensions, _, err = parseExtensions("link.html")
require.NoError(t, err)
obj, err := f.NewObject(context.Background(), "example2.link.html")
require.NoError(t, err)
rc, err := obj.Open(context.Background())
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
_, err = io.Copy(&buf, rc)
require.NoError(t, err)
text := buf.String()
require.True(t, strings.HasPrefix(text, "<html>"))
require.True(t, strings.HasSuffix(text, "</html>\n"))
for _, excerpt := range []string{
`<meta http-equiv="refresh"`,
`Loading <a href="`,
} {
require.Contains(t, text, excerpt)
}
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
func (f *Fs) InternalTestShortcuts(t *testing.T) {
const (
// from fstest/fstests/fstests.go
existingDir = "hello? sausage"
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
existingSubDir = "êé"
)
ctx := context.Background()
srcObj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
srcHash, err := srcObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.NotEqual(t, "", srcHash)
t.Run("Errors", func(t *testing.T) {
_, err := f.makeShortcut(ctx, "", f, "")
assert.Error(t, err)
assert.Contains(t, err.Error(), "can't be root")
_, err = f.makeShortcut(ctx, "notfound", f, "dst")
assert.Error(t, err)
assert.Contains(t, err.Error(), "can't find source")
_, err = f.makeShortcut(ctx, existingFile, f, existingFile)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not overwriting")
assert.Contains(t, err.Error(), "existing file")
_, err = f.makeShortcut(ctx, existingFile, f, existingDir)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not overwriting")
assert.Contains(t, err.Error(), "existing directory")
})
t.Run("File", func(t *testing.T) {
dstObj, err := f.makeShortcut(ctx, existingFile, f, "shortcut.txt")
require.NoError(t, err)
require.NotNil(t, dstObj)
assert.Equal(t, "shortcut.txt", dstObj.Remote())
dstHash, err := dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
})
t.Run("Dir", func(t *testing.T) {
dstObj, err := f.makeShortcut(ctx, existingDir, f, "shortcutdir")
require.NoError(t, err)
require.Nil(t, dstObj)
entries, err := f.List(ctx, "shortcutdir")
require.NoError(t, err)
require.Equal(t, 1, len(entries))
require.Equal(t, "shortcutdir/"+existingSubDir, entries[0].Remote())
require.NoError(t, f.Rmdir(ctx, "shortcutdir"))
})
t.Run("Command", func(t *testing.T) {
_, err := f.Command(ctx, "shortcut", []string{"one"}, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "need exactly 2 arguments")
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
"target": "doesnotexistremote:",
})
require.Error(t, err)
require.Contains(t, err.Error(), "couldn't find target")
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
"target": ".",
})
require.Error(t, err)
require.Contains(t, err.Error(), "target is not a drive backend")
dstObjI, err := f.Command(ctx, "shortcut", []string{existingFile, "shortcut2.txt"}, map[string]string{
"target": fs.ConfigString(f),
})
require.NoError(t, err)
dstObj := dstObjI.(*Object)
assert.Equal(t, "shortcut2.txt", dstObj.Remote())
dstHash, err := dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
dstObjI, err = f.Command(ctx, "shortcut", []string{existingFile, "shortcut3.txt"}, nil)
require.NoError(t, err)
dstObj = dstObjI.(*Object)
assert.Equal(t, "shortcut3.txt", dstObj.Remote())
dstHash, err = dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
})
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/UnTrash
func (f *Fs) InternalTestUnTrash(t *testing.T) {
ctx := context.Background()
// Make some objects, one in a subdir
contents := random.String(100)
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
// Check objects
checkObjects := func() {
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{
file1,
file2,
}, []string{
"trashDir/subdir",
}, f.Precision())
}
checkObjects()
// Make sure we are using the trash
require.Equal(t, true, f.opt.UseTrash)
// Remove the object and the dir
require.NoError(t, obj1.Remove(ctx))
require.NoError(t, f.Purge(ctx, "trashDir/subdir"))
// Check objects gone
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{}, []string{}, f.Precision())
// Restore the object and directory
r, err := f.unTrashDir(ctx, "trashDir", true)
require.NoError(t, err)
assert.Equal(t, unTrashResult{Errors: 0, Untrashed: 2}, r)
// Check objects restored
checkObjects()
// Remove the test dir
require.NoError(t, f.Purge(ctx, "trashDir"))
}
func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
f.InternalTestDocumentImport(t)
t.Run("DocumentUpdate", func(t *testing.T) {
f.InternalTestDocumentUpdate(t)
t.Run("DocumentExport", func(t *testing.T) {
f.InternalTestDocumentExport(t)
t.Run("DocumentLink", func(t *testing.T) {
f.InternalTestDocumentLink(t)
})
})
})
})
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1,35 +1,17 @@
// Test Drive filesystem interface // Test Drive filesystem interface
package drive_test
package drive
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/backend/drive"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestDrive:", RemoteName: "TestDrive:",
NilObject: (*Object)(nil), NilObject: (*drive.Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
CeilChunkSize: fstests.NextPowerOfTwo,
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -1,178 +0,0 @@
{
"importFormats": {
"text/tab-separated-values": [
"application/vnd.google-apps.spreadsheet"
],
"application/x-vnd.oasis.opendocument.presentation": [
"application/vnd.google-apps.presentation"
],
"image/jpeg": [
"application/vnd.google-apps.document"
],
"image/bmp": [
"application/vnd.google-apps.document"
],
"image/gif": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-excel.sheet.macroenabled.12": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-powerpoint.presentation.macroenabled.12": [
"application/vnd.google-apps.presentation"
],
"application/vnd.ms-word.template.macroenabled.12": [
"application/vnd.google-apps.document"
],
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": [
"application/vnd.google-apps.document"
],
"image/pjpeg": [
"application/vnd.google-apps.document"
],
"application/vnd.google-apps.script+text/plain": [
"application/vnd.google-apps.script"
],
"application/vnd.ms-excel": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.sun.xml.writer": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-word.document.macroenabled.12": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": [
"application/vnd.google-apps.presentation"
],
"text/rtf": [
"application/vnd.google-apps.document"
],
"text/plain": [
"application/vnd.google-apps.document"
],
"application/vnd.oasis.opendocument.spreadsheet": [
"application/vnd.google-apps.spreadsheet"
],
"application/x-vnd.oasis.opendocument.spreadsheet": [
"application/vnd.google-apps.spreadsheet"
],
"image/png": [
"application/vnd.google-apps.document"
],
"application/x-vnd.oasis.opendocument.text": [
"application/vnd.google-apps.document"
],
"application/msword": [
"application/vnd.google-apps.document"
],
"application/pdf": [
"application/vnd.google-apps.document"
],
"application/json": [
"application/vnd.google-apps.script"
],
"application/x-msmetafile": [
"application/vnd.google-apps.drawing"
],
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.ms-powerpoint": [
"application/vnd.google-apps.presentation"
],
"application/vnd.ms-excel.template.macroenabled.12": [
"application/vnd.google-apps.spreadsheet"
],
"image/x-bmp": [
"application/vnd.google-apps.document"
],
"application/rtf": [
"application/vnd.google-apps.document"
],
"application/vnd.openxmlformats-officedocument.presentationml.template": [
"application/vnd.google-apps.presentation"
],
"image/x-png": [
"application/vnd.google-apps.document"
],
"text/html": [
"application/vnd.google-apps.document"
],
"application/vnd.oasis.opendocument.text": [
"application/vnd.google-apps.document"
],
"application/vnd.openxmlformats-officedocument.presentationml.presentation": [
"application/vnd.google-apps.presentation"
],
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.google-apps.script+json": [
"application/vnd.google-apps.script"
],
"application/vnd.openxmlformats-officedocument.presentationml.slideshow": [
"application/vnd.google-apps.presentation"
],
"application/vnd.ms-powerpoint.template.macroenabled.12": [
"application/vnd.google-apps.presentation"
],
"text/csv": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.oasis.opendocument.presentation": [
"application/vnd.google-apps.presentation"
],
"image/jpg": [
"application/vnd.google-apps.document"
],
"text/richtext": [
"application/vnd.google-apps.document"
]
},
"exportFormats": {
"application/vnd.google-apps.document": [
"application/rtf",
"application/vnd.oasis.opendocument.text",
"text/html",
"application/pdf",
"application/epub+zip",
"application/zip",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"text/plain"
],
"application/vnd.google-apps.spreadsheet": [
"application/x-vnd.oasis.opendocument.spreadsheet",
"text/tab-separated-values",
"application/pdf",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"text/csv",
"application/zip",
"application/vnd.oasis.opendocument.spreadsheet"
],
"application/vnd.google-apps.jam": [
"application/pdf"
],
"application/vnd.google-apps.script": [
"application/vnd.google-apps.script+json"
],
"application/vnd.google-apps.presentation": [
"application/vnd.oasis.opendocument.presentation",
"application/pdf",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
"text/plain"
],
"application/vnd.google-apps.form": [
"application/zip"
],
"application/vnd.google-apps.drawing": [
"image/svg+xml",
"image/png",
"application/pdf",
"image/jpeg"
]
}
}

View File

@@ -11,18 +11,18 @@
package drive package drive
import ( import (
"bytes"
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"net/url" "net/url"
"regexp"
"strconv" "strconv"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fs/fserrors" "github.com/ncw/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/readers" "github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"google.golang.org/api/drive/v3" "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
) )
@@ -50,14 +50,15 @@ type resumableUpload struct {
} }
// Upload the io.Reader in of size bytes with contentType and info // Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) { func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
params := url.Values{ params := make(url.Values)
"alt": {"json"}, params.Set("alt", "json")
"uploadType": {"resumable"}, params.Set("uploadType", "resumable")
"fields": {partialFields}, params.Set("fields", partialFields)
if f.isTeamDrive {
params.Set("supportsTeamDrives", "true")
} }
params.Set("supportsAllDrives", "true") if *driveKeepRevisionForever {
if f.opt.KeepRevisionForever {
params.Set("keepRevisionForever", "true") params.Set("keepRevisionForever", "true")
} }
urls := "https://www.googleapis.com/upload/drive/v3/files" urls := "https://www.googleapis.com/upload/drive/v3/files"
@@ -81,21 +82,18 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
if err != nil { if err != nil {
return false, err return false, err
} }
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
googleapi.Expand(req.URL, map[string]string{ googleapi.Expand(req.URL, map[string]string{
"fileId": fileID, "fileId": fileID,
}) })
req.Header.Set("Content-Type", "application/json; charset=UTF-8") req.Header.Set("Content-Type", "application/json; charset=UTF-8")
req.Header.Set("X-Upload-Content-Type", contentType) req.Header.Set("X-Upload-Content-Type", contentType)
if size >= 0 { req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
}
res, err = f.client.Do(req) res, err = f.client.Do(req)
if err == nil { if err == nil {
defer googleapi.CloseBody(res) defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res) err = googleapi.CheckResponse(res)
} }
return f.shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -109,31 +107,60 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
MediaType: contentType, MediaType: contentType,
ContentLength: size, ContentLength: size,
} }
return rx.Upload(ctx) return rx.Upload()
} }
// Make an http.Request for the range passed in // Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request { func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
req, _ := http.NewRequest("POST", rx.URI, body) req, _ := http.NewRequest("POST", rx.URI, body)
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.ContentLength = reqSize req.ContentLength = reqSize
totalSize := "*"
if rx.ContentLength >= 0 {
totalSize = strconv.FormatInt(rx.ContentLength, 10)
}
if reqSize != 0 { if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, totalSize)) req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
} else { } else {
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", totalSize)) req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
} }
req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("Content-Type", rx.MediaType)
return req return req
} }
// rangeRE matches the transfer status response from the server. $1 is
// the last byte index uploaded.
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far
//
// If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus() (start int64, err error) {
req := rx.makeRequest(0, nil, 0)
res, err := rx.f.client.Do(req)
if err != nil {
return 0, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
return rx.ContentLength, nil
}
if res.StatusCode != statusResumeIncomplete {
err = googleapi.CheckResponse(res)
if err != nil {
return 0, err
}
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
}
Range := res.Header.Get("Range")
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
start, err = strconv.ParseInt(m[1], 10, 64)
if err == nil {
return start, nil
}
}
return 0, errors.Errorf("unable to parse range %q", Range)
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil // Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) { func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
_, _ = chunk.Seek(0, io.SeekStart) _, _ = chunk.Seek(0, io.SeekStart)
req := rx.makeRequest(ctx, start, chunk, chunkSize) req := rx.makeRequest(start, chunk, chunkSize)
res, err := rx.f.client.Do(req) res, err := rx.f.client.Do(req)
if err != nil { if err != nil {
return 599, err return 599, err
@@ -155,7 +182,7 @@ func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk
// been 200 OK. // been 200 OK.
// //
// So parse the response out of the body. We aren't expecting // So parse the response out of the body. We aren't expecting
// any other 2xx codes, so we parse it unconditionally on // any other 2xx codes, so we parse it unconditionaly on
// StatusCode // StatusCode
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil { if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
return 598, err return 598, err
@@ -166,45 +193,23 @@ func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk
// Upload uploads the chunks from the input // Upload uploads the chunks from the input
// It retries each chunk using the pacer and --low-level-retries // It retries each chunk using the pacer and --low-level-retries
func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) { func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0) start := int64(0)
var StatusCode int var StatusCode int
var err error var err error
buf := make([]byte, int(rx.f.opt.ChunkSize)) buf := make([]byte, int(chunkSize))
for finished := false; !finished; { for start < rx.ContentLength {
var reqSize int64 reqSize := rx.ContentLength - start
var chunk io.ReadSeeker if reqSize >= int64(chunkSize) {
if rx.ContentLength >= 0 { reqSize = int64(chunkSize)
// If size known use repeatable reader for smoother bwlimit
if start >= rx.ContentLength {
break
}
reqSize = rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
} else {
// If size unknown read into buffer
var n int
n, err = readers.ReadFill(rx.Media, buf)
if err == io.EOF {
// Send the last chunk with the correct ContentLength
// otherwise Google doesn't know we've finished
rx.ContentLength = start + int64(n)
finished = true
} else if err != nil {
return nil, err
}
reqSize = int64(n)
chunk = bytes.NewReader(buf[:reqSize])
} }
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
// Transfer the chunk // Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) { err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize) fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize) StatusCode, err = rx.transferChunk(start, chunk, reqSize)
again, err := rx.f.shouldRetry(err) again, err := shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK { if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false again = false
err = nil err = nil

View File

@@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/rclone/rclone/backend/dropbox/dbhash" "github.com/ncw/rclone/backend/dropbox/dbhash"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )

403
backend/dropbox/dropbox.go Executable file → Normal file
View File

@@ -22,7 +22,6 @@ of path_display and all will be well.
*/ */
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"log" "log"
@@ -32,25 +31,20 @@ import (
"time" "time"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox" "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common" "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files" "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing" "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users" "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/dropbox/dbhash"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
@@ -61,6 +55,24 @@ const (
minSleep = 10 * time.Millisecond minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
)
var (
// Description of how to auth for this app
dropboxConfig = &oauth2.Config{
Scopes: []string{},
// Endpoint: oauth2.Endpoint{
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
// },
Endpoint: dropbox.OAuthEndpoint(""),
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
// A regexp matching path names for files Dropbox ignores
// See https://www.dropbox.com/en/help/145 - Ignored files
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
// Upload chunk size - setting too small makes uploads slow. // Upload chunk size - setting too small makes uploads slow.
// Chunks are buffered into memory for retries. // Chunks are buffered into memory for retries.
// //
@@ -84,101 +96,44 @@ const (
// Choose 48MB which is 91% of Maximum speed. rclone by // Choose 48MB which is 91% of Maximum speed. rclone by
// default does 4 transfers so this should use 4*48MB = 192MB // default does 4 transfers so this should use 4*48MB = 192MB
// by default. // by default.
defaultChunkSize = 48 * fs.MebiByte uploadChunkSize = fs.SizeSuffix(48 * 1024 * 1024)
maxChunkSize = 150 * fs.MebiByte maxUploadChunkSize = fs.SizeSuffix(150 * 1024 * 1024)
)
var (
// Description of how to auth for this app
dropboxConfig = &oauth2.Config{
Scopes: []string{},
// Endpoint: oauth2.Endpoint{
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
// },
Endpoint: dropbox.OAuthEndpoint(""),
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
// A regexp matching path names for files Dropbox ignores
// See https://www.dropbox.com/en/help/145 - Ignored files
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
// DbHashType is the hash.Type for Dropbox
DbHashType hash.Type
) )
// Register with Fs // Register with Fs
func init() { func init() {
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
Name: "dropbox", Name: "dropbox",
Description: "Dropbox", Description: "Dropbox",
NewFs: NewFs, NewFs: NewFs,
Config: func(name string, m configmap.Mapper) { Config: func(name string) {
opt := oauthutil.Options{ err := oauthutil.ConfigNoOffline("dropbox", name, dropboxConfig)
NoOffline: true,
}
err := oauthutil.Config("dropbox", name, m, dropboxConfig, &opt)
if err != nil { if err != nil {
log.Fatalf("Failed to configure token: %v", err) log.Fatalf("Failed to configure token: %v", err)
} }
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: []fs.Option{{
Name: "chunk_size", Name: config.ConfigClientID,
Help: fmt.Sprintf(`Upload chunk size. (< %v). Help: "Dropbox App Client Id - leave blank normally.",
Any files larger than this will be uploaded in chunks of this size.
Note that chunks are buffered in memory (one at a time) so rclone can
deal with retries. Setting this larger will increase the speed
slightly (at most 10%% for 128MB in tests) at the cost of using more
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
Default: defaultChunkSize,
Advanced: true,
}, { }, {
Name: "impersonate", Name: config.ConfigClientSecret,
Help: "Impersonate this user when using a business account.", Help: "Dropbox App Client Secret - leave blank normally.",
Default: "", }},
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// https://www.dropbox.com/help/syncing-uploads/files-not-syncing lists / and \
// as invalid characters.
// Testing revealed names with trailing spaces and the DEL character don't work.
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Base |
encoder.EncodeBackSlash |
encoder.EncodeDel |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}}...),
}) })
} flags.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
Enc encoder.MultiEncoder `config:"encoding"`
} }
// Fs represents a remote dropbox server // Fs represents a remote dropbox server
type Fs struct { type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features features *fs.Features // optional features
srv files.Client // the connection to the dropbox server srv files.Client // the connection to the dropbox server
sharing sharing.Client // as above, but for generating sharing links sharing sharing.Client // as above, but for generating sharing links
users users.Client // as above, but for accessing user information users users.Client // as above, but for accessing user information
team team.Client // for the Teams API
slashRoot string // root with "/" prefix, lowercase slashRoot string // root with "/" prefix, lowercase
slashRootSlash string // root with "/" prefix and postfix, lowercase slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls pacer *pacer.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none ns string // The namespace we are using or "" for none
} }
@@ -222,63 +177,23 @@ func shouldRetry(err error) (bool, error) {
return false, err return false, err
} }
baseErrString := errors.Cause(err).Error() baseErrString := errors.Cause(err).Error()
// First check for Insufficient Space // FIXME there is probably a better way of doing this!
if strings.Contains(baseErrString, "insufficient_space") { if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
return false, fserrors.FatalError(err)
}
// Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
}
return true, err
}
// Keep old behavior for backward compatibility
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
return true, err return true, err
} }
return fserrors.ShouldRetry(err), err return fserrors.ShouldRetry(err), err
} }
func checkUploadChunkSize(cs fs.SizeSuffix) error { // NewFs contstructs an Fs from the path, container:path
const minChunkSize = fs.Byte func NewFs(name, root string) (fs.Fs, error) {
if cs < minChunkSize { if uploadChunkSize > maxUploadChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize) return nil, errors.Errorf("chunk size too big, must be < %v", maxUploadChunkSize)
}
if cs > maxChunkSize {
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "dropbox: chunk size")
} }
// Convert the old token if it exists. The old token was just // Convert the old token if it exists. The old token was just
// just a string, the new one is a JSON blob // just a string, the new one is a JSON blob
oldToken, ok := m.Get(config.ConfigToken) oldToken := strings.TrimSpace(config.FileGet(name, config.ConfigToken))
oldToken = strings.TrimSpace(oldToken) if oldToken != "" && oldToken[0] != '{' {
if ok && oldToken != "" && oldToken[0] != '{' {
fs.Infof(name, "Converting token to new format") fs.Infof(name, "Converting token to new format")
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken) newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
err := config.SetValueAndSave(name, config.ConfigToken, newToken) err := config.SetValueAndSave(name, config.ConfigToken, newToken)
@@ -287,44 +202,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
} }
oAuthClient, _, err := oauthutil.NewClient(name, m, dropboxConfig) oAuthClient, _, err := oauthutil.NewClient(name, dropboxConfig)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to configure dropbox") return nil, errors.Wrap(err, "failed to configure dropbox")
} }
f := &Fs{ f := &Fs{
name: name, name: name,
opt: *opt, pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
config := dropbox.Config{ config := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
Client: oAuthClient, // maybe??? Client: oAuthClient, // maybe???
HeaderGenerator: f.headerGenerator, HeaderGenerator: f.headerGenerator,
} }
// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
f.team = team.New(config)
if opt.Impersonate != "" {
user := team.UserSelectorArg{
Email: opt.Impersonate,
}
user.Tag = "email"
members := []*team.UserSelectorArg{&user}
args := team.NewMembersGetInfoArgs(members)
memberIds, err := f.team.MembersGetInfo(args)
if err != nil {
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
}
config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
}
f.srv = files.New(config) f.srv = files.New(config)
f.sharing = sharing.New(config) f.sharing = sharing.New(config)
f.users = users.New(config) f.users = users.New(config)
@@ -393,15 +284,14 @@ func (f *Fs) setRoot(root string) {
// getMetadata gets the metadata for a file or directory // getMetadata gets the metadata for a file or directory
func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) { func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{ entry, err = f.srv.GetMetadata(&files.GetMetadataArg{Path: objPath})
Path: f.opt.Enc.FromStandardPath(objPath),
})
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
switch e := err.(type) { switch e := err.(type) {
case files.GetMetadataAPIError: case files.GetMetadataAPIError:
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound { switch e.EndpointError.Path.Tag {
case files.LookupErrorNotFound:
notFound = true notFound = true
err = nil err = nil
} }
@@ -464,7 +354,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil) return f.newObjectWithInfo(remote, nil)
} }
@@ -477,7 +367,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
root := f.slashRoot root := f.slashRoot
if dir != "" { if dir != "" {
root += "/" + dir root += "/" + dir
@@ -488,7 +378,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for { for {
if !started { if !started {
arg := files.ListFolderArg{ arg := files.ListFolderArg{
Path: f.opt.Enc.FromStandardPath(root), Path: root,
Recursive: false, Recursive: false,
} }
if root == "/" { if root == "/" {
@@ -501,7 +391,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if err != nil { if err != nil {
switch e := err.(type) { switch e := err.(type) {
case files.ListFolderAPIError: case files.ListFolderAPIError:
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound { switch e.EndpointError.Path.Tag {
case files.LookupErrorNotFound:
err = fs.ErrorDirNotFound err = fs.ErrorDirNotFound
} }
} }
@@ -538,7 +429,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Only the last element is reliably cased in PathDisplay // Only the last element is reliably cased in PathDisplay
entryPath := metadata.PathDisplay entryPath := metadata.PathDisplay
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath)) leaf := path.Base(entryPath)
remote := path.Join(dir, leaf) remote := path.Join(dir, leaf)
if folderInfo != nil { if folderInfo != nil {
d := fs.NewDir(remote, time.Now()) d := fs.NewDir(remote, time.Now())
@@ -563,22 +454,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction // Temporary Object under construction
o := &Object{ o := &Object{
fs: f, fs: f,
remote: src.Remote(), remote: src.Remote(),
} }
return o, o.Update(ctx, in, src, options...) return o, o.Update(in, src, options...)
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...) return f.Put(in, src, options...)
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
root := path.Join(f.slashRoot, dir) root := path.Join(f.slashRoot, dir)
// can't create or run metadata on root // can't create or run metadata on root
@@ -596,7 +487,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// create it // create it
arg2 := files.CreateFolderArg{ arg2 := files.CreateFolderArg{
Path: f.opt.Enc.FromStandardPath(root), Path: root,
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.CreateFolderV2(&arg2) _, err = f.srv.CreateFolderV2(&arg2)
@@ -605,9 +496,10 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return err return err
} }
// purgeCheck removes the root directory, if check is set then it // Rmdir deletes the container
// refuses to do so if it has anything in //
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) { // Returns an error if it isn't empty
func (f *Fs) Rmdir(dir string) error {
root := path.Join(f.slashRoot, dir) root := path.Join(f.slashRoot, dir)
// can't remove root // can't remove root
@@ -615,33 +507,30 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return errors.New("can't remove root directory") return errors.New("can't remove root directory")
} }
if check { // check directory exists
// check directory exists _, err := f.getDirMetadata(root)
_, err = f.getDirMetadata(root) if err != nil {
if err != nil { return errors.Wrap(err, "Rmdir")
return errors.Wrap(err, "Rmdir") }
}
root = f.opt.Enc.FromStandardPath(root) // check directory empty
// check directory empty arg := files.ListFolderArg{
arg := files.ListFolderArg{ Path: root,
Path: root, Recursive: false,
Recursive: false, }
} if root == "/" {
if root == "/" { arg.Path = "" // Specify root folder as empty string
arg.Path = "" // Specify root folder as empty string }
} var res *files.ListFolderResult
var res *files.ListFolderResult err = f.pacer.Call(func() (bool, error) {
err = f.pacer.Call(func() (bool, error) { res, err = f.srv.ListFolder(&arg)
res, err = f.srv.ListFolder(&arg) return shouldRetry(err)
return shouldRetry(err) })
}) if err != nil {
if err != nil { return errors.Wrap(err, "Rmdir")
return errors.Wrap(err, "Rmdir") }
} if len(res.Entries) != 0 {
if len(res.Entries) != 0 { return errors.New("directory not empty")
return errors.New("directory not empty")
}
} }
// remove it // remove it
@@ -652,13 +541,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return err return err
} }
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
}
// Precision returns the precision // Precision returns the precision
func (f *Fs) Precision() time.Duration { func (f *Fs) Precision() time.Duration {
return time.Second return time.Second
@@ -673,7 +555,7 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
@@ -687,12 +569,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// Copy // Copy
arg := files.RelocationArg{ arg := files.RelocationArg{}
RelocationPath: files.RelocationPath{ arg.FromPath = srcObj.remotePath()
FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()), arg.ToPath = dstObj.remotePath()
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
},
}
var err error var err error
var result *files.RelocationResult var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -721,8 +600,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge(ctx context.Context, dir string) (err error) { func (f *Fs) Purge() (err error) {
return f.purgeCheck(ctx, dir, false) // Let dropbox delete the filesystem tree
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
return shouldRetry(err)
})
return err
} }
// Move src to this remote using server side move operations. // Move src to this remote using server side move operations.
@@ -734,7 +618,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
@@ -748,12 +632,9 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// Do the move // Do the move
arg := files.RelocationArg{ arg := files.RelocationArg{}
RelocationPath: files.RelocationPath{ arg.FromPath = srcObj.remotePath()
FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()), arg.ToPath = dstObj.remotePath()
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
},
}
var err error var err error
var result *files.RelocationResult var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -777,17 +658,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// PublicLink adds a "readable by anyone with link" permission on the given file or folder. // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) { func (f *Fs) PublicLink(remote string) (link string, err error) {
absPath := f.opt.Enc.FromStandardPath(path.Join(f.slashRoot, remote)) absPath := "/" + path.Join(f.Root(), remote)
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath) fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
createArg := sharing.CreateSharedLinkWithSettingsArg{ createArg := sharing.CreateSharedLinkWithSettingsArg{
Path: absPath, Path: absPath,
// FIXME this gives settings_error/not_authorized/.. errors
// and the expires setting isn't in the documentation so remove
// for now.
// Settings: &sharing.SharedLinkSettings{
// Expires: time.Now().Add(time.Duration(expire)).UTC().Round(time.Second),
// },
} }
var linkRes sharing.IsSharedLinkMetadata var linkRes sharing.IsSharedLinkMetadata
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -795,8 +670,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil && strings.Contains(err.Error(), if err != nil && strings.Contains(err.Error(), sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
fs.Debugf(absPath, "has a public link already, attempting to retrieve it") fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
listArg := sharing.ListSharedLinksArg{ listArg := sharing.ListSharedLinksArg{
Path: absPath, Path: absPath,
@@ -837,7 +711,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -858,12 +732,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// ...apparently not necessary // ...apparently not necessary
// Do the move // Do the move
arg := files.RelocationArg{ arg := files.RelocationArg{}
RelocationPath: files.RelocationPath{ arg.FromPath = srcPath
FromPath: f.opt.Enc.FromStandardPath(srcPath), arg.ToPath = dstPath
ToPath: f.opt.Enc.FromStandardPath(dstPath),
},
}
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.MoveV2(&arg) _, err = f.srv.MoveV2(&arg)
return shouldRetry(err) return shouldRetry(err)
@@ -876,7 +747,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// About gets quota information // About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { func (f *Fs) About() (usage *fs.Usage, err error) {
var q *users.SpaceUsage var q *users.SpaceUsage
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
q, err = f.users.GetSpaceUsage() q, err = f.users.GetSpaceUsage()
@@ -904,7 +775,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set { func (f *Fs) Hashes() hash.Set {
return hash.Set(DbHashType) return hash.Set(hash.Dropbox)
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -928,8 +799,8 @@ func (o *Object) Remote() string {
} }
// Hash returns the dropbox special hash // Hash returns the dropbox special hash
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != DbHashType { if t != hash.Dropbox {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
err := o.readMetaData() err := o.readMetaData()
@@ -990,7 +861,7 @@ func (o *Object) readMetaData() (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
err := o.readMetaData() err := o.readMetaData()
if err != nil { if err != nil {
fs.Debugf(o, "Failed to read metadata: %v", err) fs.Debugf(o, "Failed to read metadata: %v", err)
@@ -1002,7 +873,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
// //
// Commits the datastore // Commits the datastore
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
// Dropbox doesn't have a way of doing this so returning this // Dropbox doesn't have a way of doing this so returning this
// error will cause the file to be deleted first then // error will cause the file to be deleted first then
// re-uploaded to set the time. // re-uploaded to set the time.
@@ -1015,13 +886,9 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.bytes)
headers := fs.OpenOptionHeaders(options) headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{ arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
ExtraHeaders: headers,
}
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
_, in, err = o.fs.srv.Download(&arg) _, in, err = o.fs.srv.Download(&arg)
return shouldRetry(err) return shouldRetry(err)
@@ -1030,7 +897,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
switch e := err.(type) { switch e := err.(type) {
case files.DownloadAPIError: case files.DownloadAPIError:
// Don't attempt to retry copyright violation errors // Don't attempt to retry copyright violation errors
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent { if e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
return nil, fserrors.NoRetryError(err) return nil, fserrors.NoRetryError(err)
} }
} }
@@ -1044,7 +911,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an // unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
// avoidable request to the Dropbox API that does not carry payload. // avoidable request to the Dropbox API that does not carry payload.
func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) { func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
chunkSize := int64(o.fs.opt.ChunkSize) chunkSize := int64(uploadChunkSize)
chunks := 0 chunks := 0
if size != -1 { if size != -1 {
chunks = int(size/chunkSize) + 1 chunks = int(size/chunkSize) + 1
@@ -1131,13 +998,6 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
return false, nil return false, nil
} }
entry, err = o.fs.srv.UploadSessionFinish(args, chunk) entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything // after the first chunk is uploaded, we retry everything
return err != nil, err return err != nil, err
}) })
@@ -1152,20 +1012,21 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
// Copy the reader into the object updating modTime and size // Copy the reader into the object updating modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
remote := o.remotePath() remote := o.remotePath()
if ignoredFiles.MatchString(remote) { if ignoredFiles.MatchString(remote) {
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote))) fs.Logf(o, "File name disallowed - not uploading")
return nil
} }
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath())) commitInfo := files.NewCommitInfo(o.remotePath())
commitInfo.Mode.Tag = "overwrite" commitInfo.Mode.Tag = "overwrite"
// The Dropbox API only accepts timestamps in UTC with second precision. // The Dropbox API only accepts timestamps in UTC with second precision.
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second) commitInfo.ClientModified = src.ModTime().UTC().Round(time.Second)
size := src.Size() size := src.Size()
var err error var err error
var entry *files.FileMetadata var entry *files.FileMetadata
if size > int64(o.fs.opt.ChunkSize) || size == -1 { if size > int64(uploadChunkSize) || size == -1 {
entry, err = o.uploadChunked(in, commitInfo, size) entry, err = o.uploadChunked(in, commitInfo, size)
} else { } else {
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
@@ -1180,11 +1041,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) (err error) { func (o *Object) Remove() (err error) {
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{ _, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
})
return shouldRetry(err) return shouldRetry(err)
}) })
return err return err

View File

@@ -1,26 +1,17 @@
// Test Dropbox filesystem interface // Test Dropbox filesystem interface
package dropbox package dropbox_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/backend/dropbox"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestDropbox:", RemoteName: "TestDropbox:",
NilObject: (*Object)(nil), NilObject: (*dropbox.Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -1,408 +0,0 @@
package fichier
import (
"context"
"io"
"net/http"
"regexp"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
)
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
403, // Forbidden (may happen when request limit is exceeded)
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
// Detect this error which the integration tests provoke
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
//
// https://1fichier.com/api.html
//
// file/ls.cgi is limited :
//
// Warning (can be changed in case of abuses) :
// List all files of the account is limited to 1 request per hour.
// List folders is limited to 5 000 results and 1 request per folder per 30s.
if err != nil && strings.Contains(err.Error(), "Flood detected") {
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
time.Sleep(30 * time.Second)
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
request := DownloadRequest{
URL: url,
Single: 1,
}
opts := rest.Opts{
Method: "POST",
Path: "/download/get_token.cgi",
}
var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
return &token, nil
}
func fileFromSharedFile(file *SharedFile) File {
return File{
URL: file.Link,
Filename: file.Filename,
Size: file.Size,
}
}
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
}
var sharedFiles SharedFolderResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
entries = make([]fs.DirEntry, len(sharedFiles))
for i, sharedFile := range sharedFiles {
entries[i] = f.newObjectFromFile(ctx, "", fileFromSharedFile(&sharedFile))
}
return entries, nil
}
func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesList, err error) {
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
request := ListFilesRequest{
FolderID: directoryID,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/ls.cgi",
}
filesList = &FilesList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
for i := range filesList.Items {
item := &filesList.Items[i]
item.Filename = f.opt.Enc.ToStandardName(item.Filename)
}
return filesList, nil
}
func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *FoldersList, err error) {
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
request := ListFolderRequest{
FolderID: directoryID,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/ls.cgi",
}
foldersList = &FoldersList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list folders")
}
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
for i := range foldersList.SubFolders {
folder := &foldersList.SubFolders[i]
folder.Name = f.opt.Enc.ToStandardName(folder.Name)
}
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
return foldersList, err
}
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
files, err := f.listFiles(ctx, folderID)
if err != nil {
return nil, err
}
folders, err := f.listFolders(ctx, folderID)
if err != nil {
return nil, err
}
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
for i, item := range files.Items {
entries[i] = f.newObjectFromFile(ctx, dir, item)
}
for i, folder := range folders.SubFolders {
createDate, err := time.Parse("2006-01-02 15:04:05", folder.CreateDate)
if err != nil {
return nil, err
}
fullPath := getRemote(dir, folder.Name)
folderID := strconv.Itoa(folder.ID)
entries[len(files.Items)+i] = fs.NewDir(fullPath, createDate).SetID(folderID)
// fs.Debugf(f, "Put Path `%s` for id `%d` into dircache", fullPath, folder.ID)
f.dirCache.Put(fullPath, folderID)
}
return entries, nil
}
func (f *Fs) newObjectFromFile(ctx context.Context, dir string, item File) *Object {
return &Object{
fs: f,
remote: getRemote(dir, item.Filename),
file: item,
}
}
func getRemote(dir, fileName string) string {
if dir == "" {
return fileName
}
return dir + "/" + fileName
}
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) {
name := f.opt.Enc.FromStandardName(leaf)
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
request := MakeFolderRequest{
FolderID: folderID,
Name: name,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mkdir.cgi",
}
response = &MakeFolderResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create folder")
}
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
return response, err
}
func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (response *GenericOKResponse, err error) {
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
request := &RemoveFolderRequest{
FolderID: folderID,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/rm.cgi",
}
response = &GenericOKResponse{}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove folder")
}
if response.Status != "OK" {
return nil, errors.New("Can't remove non-empty dir")
}
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
return response, nil
}
func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKResponse, err error) {
request := &RemoveFileRequest{
Files: []RmFile{
{url},
},
}
opts := rest.Opts{
Method: "POST",
Path: "/file/rm.cgi",
}
response = &GenericOKResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove file")
}
// fs.Debugf(f, "Removed file with url `%s`", url)
return response, nil
}
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node")
opts := rest.Opts{
Method: "GET",
ContentType: "application/json", // 1Fichier API is bad
Path: "/upload/get_upload_server.cgi",
}
response = &GetUploadNodeResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "didnt got an upload node")
}
// fs.Debugf(f, "Got Upload node")
return response, err
}
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string, options ...fs.OpenOption) (response *http.Response, err error) {
// fs.Debugf(f, "Uploading File `%s`", fileName)
fileName = f.opt.Enc.FromStandardName(fileName)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
}
opts := rest.Opts{
Method: "POST",
Path: "/upload.cgi",
Parameters: map[string][]string{
"id": {uploadID},
},
NoResponse: true,
Body: in,
ContentLength: &size,
Options: options,
MultipartContentName: "file[]",
MultipartFileName: fileName,
MultipartParams: map[string][]string{
"did": {folderID},
},
}
if node != "" {
opts.RootURL = "https://" + node
}
err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't upload file")
}
// fs.Debugf(f, "Uploaded File `%s`", fileName)
return response, err
}
func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
}
opts := rest.Opts{
Method: "GET",
Path: "/end.pl",
RootURL: "https://" + nodeurl,
Parameters: map[string][]string{
"xid": {uploadID},
},
ExtraHeaders: map[string]string{
"JSON": "1",
},
}
response = &EndFileUploadResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't finish file upload")
}
return response, err
}

View File

@@ -1,425 +0,0 @@
package fichier
import (
"context"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 400 * time.Millisecond // api is extremely rate limited now
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
attackConstant = 0 // start with max sleep
)
func init() {
fs.Register(&fs.RegInfo{
Name: "fichier",
Description: "1Fichier",
Config: func(name string, config configmap.Mapper) {
},
NewFs: NewFs,
Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Name: "api_key",
}, {
Help: "If you want to download a shared folder, add this parameter",
Name: "shared_folder",
Required: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Characters that need escaping
//
// '\\': '', // FULLWIDTH REVERSE SOLIDUS
// '<': '', // FULLWIDTH LESS-THAN SIGN
// '>': '', // FULLWIDTH GREATER-THAN SIGN
// '"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
// '\'': '', // FULLWIDTH APOSTROPHE
// '$': '', // FULLWIDTH DOLLAR SIGN
// '`': '', // FULLWIDTH GRAVE ACCENT
//
// Leading space and trailing space
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeSingleQuote |
encoder.EncodeBackQuote |
encoder.EncodeDoubleQuote |
encoder.EncodeLtGt |
encoder.EncodeDollar |
encoder.EncodeLeftSpace |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs is the interface a cloud storage system must provide
type Fs struct {
root string
name string
features *fs.Features
opt Options
dirCache *dircache.DirCache
baseClient *http.Client
pacer *fs.Pacer
rest *rest.Client
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
folderID, err := strconv.Atoi(pathID)
if err != nil {
return "", false, err
}
folders, err := f.listFolders(ctx, folderID)
if err != nil {
return "", false, err
}
for _, folder := range folders.SubFolders {
if folder.Name == leaf {
pathIDOut := strconv.Itoa(folder.ID)
return pathIDOut, true, nil
}
}
return "", false, nil
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
folderID, err := strconv.Atoi(pathID)
if err != nil {
return "", err
}
resp, err := f.makeFolder(ctx, leaf, folderID)
if err != nil {
return "", err
}
return strconv.Itoa(resp.FolderID), err
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("1Fichier root '%s'", f.root)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.Whirlpool)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// NewFs makes a new Fs object from the path
//
// The path is of the form remote:path
//
// Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned.
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(config, opt)
if err != nil {
return nil, err
}
// If using a Shared Folder override root
if opt.SharedFolder != "" {
root = ""
}
//workaround for wonky parser
root = strings.Trim(root, "/")
f := &Fs{
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
baseClient: &http.Client{},
}
f.features = (&fs.Features{
DuplicateFiles: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
client := fshttp.NewClient(fs.Config)
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
f.rest.SetHeader("Authorization", "Bearer "+f.opt.APIKey)
f.dirCache = dircache.New(root, rootID, f)
ctx := context.Background()
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
return f, nil
}
return nil, err
}
f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.opt.SharedFolder != "" {
return f.listSharedFiles(ctx, f.opt.SharedFolder)
}
dirContent, err := f.listDir(ctx, dir)
if err != nil {
return nil, err
}
return dirContent, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
files, err := f.listFiles(ctx, folderID)
if err != nil {
return nil, err
}
for _, file := range files.Items {
if file.Filename == leaf {
path, ok := f.dirCache.GetInv(directoryID)
if !ok {
return nil, errors.New("Cannot find dir in dircache")
}
return f.newObjectFromFile(ctx, path, file), nil
}
}
return nil, fs.ErrorObjectNotFound
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
default:
return nil, err
}
}
// putUnchecked uploads the object with the given name and size
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(100e9) {
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
nodeResponse, err := f.getUploadNode(ctx)
if err != nil {
return nil, err
}
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
if err != nil {
return nil, err
}
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL, options...)
if err != nil {
return nil, err
}
fileUploadResponse, err := f.endUpload(ctx, nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
if len(fileUploadResponse.Links) != 1 {
return nil, errors.New("unexpected amount of files")
}
link := fileUploadResponse.Links[0]
fileSize, err := strconv.ParseInt(link.Size, 10, 64)
if err != nil {
return nil, err
}
return &Object{
fs: f,
remote: remote,
file: File{
ACL: 0,
CDN: 0,
Checksum: link.Whirlpool,
ContentType: "",
Date: time.Now().Format("2006-01-02 15:04:05"),
Filename: link.Filename,
Pass: 0,
Size: fileSize,
URL: link.Download,
},
}, nil
}
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true)
return err
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return err
}
_, err = f.removeFolder(ctx, dir, folderID)
if err != nil {
return err
}
f.dirCache.FlushDir(dir)
return nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ dircache.DirCacher = (*Fs)(nil)
)

View File

@@ -1,17 +0,0 @@
// Test 1Fichier filesystem interface
package fichier
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fs.Config.LogLevel = fs.LogLevelDebug
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFichier:",
})
}

View File

@@ -1,158 +0,0 @@
package fichier
import (
"context"
"io"
"net/http"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
)
// Object is a filesystem like object provided by an Fs
type Object struct {
fs *Fs
remote string
file File
}
// String returns a description of the Object
func (o *Object) String() string {
return o.file.Filename
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime(ctx context.Context) time.Time {
modTime, err := time.Parse("2006-01-02 15:04:05", o.file.Date)
if err != nil {
return time.Now()
}
return modTime
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.file.Size
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.Whirlpool {
return "", hash.ErrUnsupported
}
return o.file.Checksum, nil
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(context.Context, time.Time) error {
return fs.ErrorCantSetModTime
//return errors.New("setting modtime is not supported for 1fichier remotes")
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
fs.FixRangeOption(options, o.file.Size)
downloadToken, err := o.fs.getDownloadToken(ctx, o.file.URL)
if err != nil {
return nil, err
}
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: downloadToken.URL,
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.rest.Call(ctx, &opts)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return resp.Body, err
}
// Update in to the object with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if src.Size() < 0 {
return errors.New("refusing to update with unknown size")
}
// upload with new size but old name
info, err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...)
if err != nil {
return err
}
// Delete duplicate after successful upload
err = o.Remove(ctx)
if err != nil {
return errors.Wrap(err, "failed to remove old version")
}
// Replace guts of old object with new one
*o = *info.(*Object)
return nil
}
// Remove removes this object
func (o *Object) Remove(ctx context.Context) error {
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
_, err := o.fs.deleteFile(ctx, o.file.URL)
if err != nil {
return err
}
return nil
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.file.ContentType
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.file.URL
}
// Check the interfaces are satisfied
var (
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -1,120 +0,0 @@
package fichier
// ListFolderRequest is the request structure of the corresponding request
type ListFolderRequest struct {
FolderID int `json:"folder_id"`
}
// ListFilesRequest is the request structure of the corresponding request
type ListFilesRequest struct {
FolderID int `json:"folder_id"`
}
// DownloadRequest is the request structure of the corresponding request
type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
}
// RemoveFolderRequest is the request structure of the corresponding request
type RemoveFolderRequest struct {
FolderID int `json:"folder_id"`
}
// RemoveFileRequest is the request structure of the corresponding request
type RemoveFileRequest struct {
Files []RmFile `json:"files"`
}
// RmFile is the request structure of the corresponding request
type RmFile struct {
URL string `json:"url"`
}
// GenericOKResponse is the response structure of the corresponding request
type GenericOKResponse struct {
Status string `json:"status"`
Message string `json:"message"`
}
// MakeFolderRequest is the request structure of the corresponding request
type MakeFolderRequest struct {
Name string `json:"name"`
FolderID int `json:"folder_id"`
}
// MakeFolderResponse is the response structure of the corresponding request
type MakeFolderResponse struct {
Name string `json:"name"`
FolderID int `json:"folder_id"`
}
// GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct {
ID string `json:"id"`
URL string `json:"url"`
}
// GetTokenResponse is the response structure of the corresponding request
type GetTokenResponse struct {
URL string `json:"url"`
Status string `json:"Status"`
Message string `json:"Message"`
}
// SharedFolderResponse is the response structure of the corresponding request
type SharedFolderResponse []SharedFile
// SharedFile is the structure how 1Fichier returns a shared File
type SharedFile struct {
Filename string `json:"filename"`
Link string `json:"link"`
Size int64 `json:"size"`
}
// EndFileUploadResponse is the response structure of the corresponding request
type EndFileUploadResponse struct {
Incoming int `json:"incoming"`
Links []struct {
Download string `json:"download"`
Filename string `json:"filename"`
Remove string `json:"remove"`
Size string `json:"size"`
Whirlpool string `json:"whirlpool"`
} `json:"links"`
}
// File is the structure how 1Fichier returns a File
type File struct {
ACL int `json:"acl"`
CDN int `json:"cdn"`
Checksum string `json:"checksum"`
ContentType string `json:"content-type"`
Date string `json:"date"`
Filename string `json:"filename"`
Pass int `json:"pass"`
Size int64 `json:"size"`
URL string `json:"url"`
}
// FilesList is the structure how 1Fichier returns a list of files
type FilesList struct {
Items []File `json:"items"`
Status string `json:"Status"`
}
// Folder is the structure how 1Fichier returns a Folder
type Folder struct {
CreateDate string `json:"create_date"`
ID int `json:"id"`
Name string `json:"name"`
Pass int `json:"pass"`
}
// FoldersList is the structure how 1Fichier returns a list of Folders
type FoldersList struct {
FolderID int `json:"folder_id"`
Name string `json:"name"`
Status string `json:"Status"`
SubFolders []Folder `json:"sub_folders"`
}

View File

@@ -2,28 +2,22 @@
package ftp package ftp
import ( import (
"context"
"crypto/tls"
"io" "io"
"net/textproto" "net/textproto"
"net/url"
"os" "os"
"path" "path"
"runtime"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/jlaffaye/ftp" "github.com/jlaffaye/ftp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
) )
// Register with Fs // Register with Fs
@@ -32,89 +26,37 @@ func init() {
Name: "ftp", Name: "ftp",
Description: "FTP Connection", Description: "FTP Connection",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{
Name: "host", {
Help: "FTP host to connect to", Name: "host",
Required: true, Help: "FTP host to connect to",
Examples: []fs.OptionExample{{ Optional: false,
Value: "ftp.example.com", Examples: []fs.OptionExample{{
Help: "Connect to ftp.example.com", Value: "ftp.example.com",
}}, Help: "Connect to ftp.example.com",
}, { }},
Name: "user", }, {
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"), Name: "user",
}, { Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
Name: "port", Optional: true,
Help: "FTP port, leave blank to use default (21)", }, {
}, { Name: "port",
Name: "pass", Help: "FTP port, leave blank to use default (21) ",
Help: "FTP password", Optional: true,
IsPassword: true, }, {
Required: true, Name: "pass",
}, { Help: "FTP password",
Name: "tls", IsPassword: true,
Help: `Use FTPS over TLS (Implicit) Optional: false,
When using implicit FTP over TLS the client will connect using TLS },
right from the start, which in turn breaks the compatibility with },
non-TLS-aware servers. This is usually served over port 990 rather
than port 21. Cannot be used in combination with explicit FTP.`,
Default: false,
}, {
Name: "explicit_tls",
Help: `Use FTP over TLS (Explicit)
When using explicit FTP over TLS the client explicitly request
security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTP.`,
Default: false,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0,
Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server",
Default: false,
Advanced: true,
}, {
Name: "disable_epsv",
Help: "Disable using EPSV even if server advertises support",
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// The FTP protocol can't handle trailing spaces (for instance
// pureftpd turns them into _)
//
// proftpd can't handle '*' in file names
// pureftpd can't handle '[', ']' or '*'
Default: (encoder.Display |
encoder.EncodeRightSpace),
}},
}) })
} }
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote FTP server // Fs represents a remote FTP server
type Fs struct { type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on if any root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features features *fs.Features // optional features
url string url string
user string user string
@@ -122,7 +64,6 @@ type Fs struct {
dialAddr string dialAddr string
poolMu sync.Mutex poolMu sync.Mutex
pool []*ftp.ServerConn pool []*ftp.ServerConn
tokens *pacer.TokenDispenser
} }
// Object describes an FTP file // Object describes an FTP file
@@ -162,70 +103,10 @@ func (f *Fs) Features() *fs.Features {
return f.features return f.features
} }
// Enable debugging output
type debugLog struct {
mu sync.Mutex
auth bool
}
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written from p (0 <= n <= len(p)) and any error
// encountered that caused the write to stop early. Write must return a non-nil
// error if it returns n < len(p). Write must not modify the slice data, even
// temporarily.
//
// Implementations must not retain p.
//
// This writes debug info to the log
func (dl *debugLog) Write(p []byte) (n int, err error) {
dl.mu.Lock()
defer dl.mu.Unlock()
_, file, _, ok := runtime.Caller(1)
direction := "FTP Rx"
if ok && strings.Contains(file, "multi") {
direction = "FTP Tx"
}
lines := strings.Split(string(p), "\r\n")
if lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
for _, line := range lines {
if !dl.auth && strings.HasPrefix(line, "PASS") {
fs.Debugf(direction, "PASS *****")
continue
}
fs.Debugf(direction, "%q", line)
}
return len(p), nil
}
// Open a new connection to the FTP server. // Open a new connection to the FTP server.
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) { func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server") fs.Debugf(f, "Connecting to FTP server")
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)} c, err := ftp.DialTimeout(f.dialAddr, fs.Config.ConnectTimeout)
if f.opt.TLS && f.opt.ExplicitTLS {
fs.Errorf(f, "Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
} else if f.opt.TLS {
tlsConfig := &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
} else if f.opt.ExplicitTLS {
tlsConfig := &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
}
if fs.Config.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: fs.Config.Dump&fs.DumpAuth != 0}))
}
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil { if err != nil {
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err) fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Dial") return nil, errors.Wrap(err, "ftpConnection Dial")
@@ -241,9 +122,6 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
// Get an FTP connection from the pool, or open a new one // Get an FTP connection from the pool, or open a new one
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) { func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
f.tokens.Get()
}
f.poolMu.Lock() f.poolMu.Lock()
if len(f.pool) > 0 { if len(f.pool) > 0 {
c = f.pool[0] c = f.pool[0]
@@ -253,11 +131,7 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
if c != nil { if c != nil {
return c, nil return c, nil
} }
c, err = f.ftpConnection() return f.ftpConnection()
if err != nil && f.opt.Concurrency > 0 {
f.tokens.Put()
}
return c, err
} }
// Return an FTP connection to the pool // Return an FTP connection to the pool
@@ -267,16 +141,7 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
// if err is not nil then it checks the connection is alive using a // if err is not nil then it checks the connection is alive using a
// NOOP request // NOOP request
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) { func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
defer f.tokens.Put()
}
if pc == nil {
return
}
c := *pc c := *pc
if c == nil {
return
}
*pc = nil *pc = nil
if err != nil { if err != nil {
// If not a regular FTP error code then check the connection // If not a regular FTP error code then check the connection
@@ -295,44 +160,56 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
f.poolMu.Unlock() f.poolMu.Unlock()
} }
// NewFs constructs an Fs from the path, container:path // NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) { func NewFs(name, root string) (ff fs.Fs, err error) {
ctx := context.Background()
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err) // defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// Parse config into Options struct // FIXME Convert the old scheme used for the first beta - remove after release
opt := new(Options) if ftpURL := config.FileGet(name, "url"); ftpURL != "" {
err = configstruct.Set(m, opt) fs.Infof(name, "Converting old configuration")
if err != nil { u, err := url.Parse(ftpURL)
return nil, err if err != nil {
return nil, errors.Wrapf(err, "Failed to parse old url %q", ftpURL)
}
parts := strings.Split(u.Host, ":")
config.FileSet(name, "host", parts[0])
if len(parts) > 1 {
config.FileSet(name, "port", parts[1])
}
config.FileSet(name, "host", u.Host)
config.FileSet(name, "user", config.FileGet(name, "username"))
config.FileSet(name, "pass", config.FileGet(name, "password"))
config.FileDeleteKey(name, "username")
config.FileDeleteKey(name, "password")
config.FileDeleteKey(name, "url")
config.SaveConfig()
if u.Path != "" && u.Path != "/" {
fs.Errorf(name, "Path %q in FTP URL no longer supported - put it on the end of the remote %s:%s", u.Path, name, u.Path)
}
} }
pass, err := obscure.Reveal(opt.Pass) host := config.FileGet(name, "host")
user := config.FileGet(name, "user")
pass := config.FileGet(name, "pass")
port := config.FileGet(name, "port")
pass, err = obscure.Reveal(pass)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "NewFS decrypt password") return nil, errors.Wrap(err, "NewFS decrypt password")
} }
user := opt.User
if user == "" { if user == "" {
user = os.Getenv("USER") user = os.Getenv("USER")
} }
port := opt.Port
if port == "" { if port == "" {
port = "21" port = "21"
} }
dialAddr := opt.Host + ":" + port dialAddr := host + ":" + port
protocol := "ftp://" u := "ftp://" + path.Join(dialAddr+"/", root)
if opt.TLS {
protocol = "ftps://"
}
u := protocol + path.Join(dialAddr+"/", root)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt,
url: u, url: u,
user: user, user: user,
pass: pass, pass: pass,
dialAddr: dialAddr, dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
@@ -350,7 +227,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
if f.root == "." { if f.root == "." {
f.root = "" f.root = ""
} }
_, err := f.NewObject(ctx, remote) _, err := f.NewObject(remote)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile { if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -389,37 +266,10 @@ func translateErrorDir(err error) error {
return err return err
} }
// entryToStandard converts an incoming ftp.Entry to Standard encoding
func (f *Fs) entryToStandard(entry *ftp.Entry) {
// Skip . and .. as we don't want these encoded
if entry.Name == "." || entry.Name == ".." {
return
}
entry.Name = f.opt.Enc.ToStandardName(entry.Name)
entry.Target = f.opt.Enc.ToStandardPath(entry.Target)
}
// dirFromStandardPath returns dir in encoded form.
func (f *Fs) dirFromStandardPath(dir string) string {
// Skip . and .. as we don't want these encoded
if dir == "." || dir == ".." {
return dir
}
return f.opt.Enc.FromStandardPath(dir)
}
// findItem finds a directory entry for the name in its parent directory // findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) { func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
fullPath := path.Join(f.root, remote) fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
// if root, assume exists and synthesize an entry
return &ftp.Entry{
Name: "",
Type: ftp.EntryTypeFolder,
Time: time.Now(),
}, nil
}
dir := path.Dir(fullPath) dir := path.Dir(fullPath)
base := path.Base(fullPath) base := path.Base(fullPath)
@@ -427,13 +277,12 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
if err != nil { if err != nil {
return nil, errors.Wrap(err, "findItem") return nil, errors.Wrap(err, "findItem")
} }
files, err := c.List(f.dirFromStandardPath(dir)) files, err := c.List(dir)
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
return nil, translateErrorFile(err) return nil, translateErrorFile(err)
} }
for _, file := range files { for _, file := range files {
f.entryToStandard(file)
if file.Name == base { if file.Name == base {
return file, nil return file, nil
} }
@@ -443,7 +292,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(remote) entry, err := f.findItem(remote)
if err != nil { if err != nil {
@@ -487,42 +336,17 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err) // defer fs.Trace(dir, "curlevel=%d", curlevel)("")
c, err := f.getFtpConnection() c, err := f.getFtpConnection()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "list") return nil, errors.Wrap(err, "list")
} }
files, err := c.List(path.Join(f.root, dir))
var listErr error f.putFtpConnection(&c, err)
var files []*ftp.Entry if err != nil {
return nil, translateErrorDir(err)
resultchan := make(chan []*ftp.Entry, 1)
errchan := make(chan error, 1)
go func() {
result, err := c.List(f.dirFromStandardPath(path.Join(f.root, dir)))
f.putFtpConnection(&c, err)
if err != nil {
errchan <- err
return
}
resultchan <- result
}()
// Wait for List for up to Timeout seconds
timer := time.NewTimer(fs.Config.Timeout)
select {
case listErr = <-errchan:
timer.Stop()
return nil, translateErrorDir(listErr)
case files = <-resultchan:
timer.Stop()
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f, "Timeout when waiting for List")
return nil, errors.New("Timeout when waiting for List")
} }
// Annoyingly FTP returns success for a directory which // Annoyingly FTP returns success for a directory which
// doesn't exist, so check it really doesn't exist if no // doesn't exist, so check it really doesn't exist if no
// entries found. // entries found.
@@ -537,7 +361,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
for i := range files { for i := range files {
object := files[i] object := files[i]
f.entryToStandard(object)
newremote := path.Join(dir, object.Name) newremote := path.Join(dir, object.Name)
switch object.Type { switch object.Type {
case ftp.EntryTypeFolder: case ftp.EntryTypeFolder:
@@ -578,7 +401,7 @@ func (f *Fs) Precision() time.Duration {
// May create the object even if it returns an error - if so // May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return // will return the object and the error, otherwise will return
// nil and the error // nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// fs.Debugf(f, "Trying to put file %s", src.Remote()) // fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(src.Remote()) err := f.mkParentDir(src.Remote())
if err != nil { if err != nil {
@@ -588,13 +411,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
fs: f, fs: f,
remote: src.Remote(), remote: src.Remote(),
} }
err = o.Update(ctx, in, src, options...) err = o.Update(in, src, options...)
return o, err return o, err
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...) return f.Put(in, src, options...)
} }
// getInfo reads the FileInfo for a path // getInfo reads the FileInfo for a path
@@ -607,21 +430,19 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
if err != nil { if err != nil {
return nil, errors.Wrap(err, "getInfo") return nil, errors.Wrap(err, "getInfo")
} }
files, err := c.List(f.dirFromStandardPath(dir)) files, err := c.List(dir)
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
return nil, translateErrorFile(err) return nil, translateErrorFile(err)
} }
for i := range files { for i := range files {
file := files[i] if files[i].Name == base {
f.entryToStandard(file)
if file.Name == base {
info := &FileInfo{ info := &FileInfo{
Name: remote, Name: remote,
Size: file.Size, Size: files[i].Size,
ModTime: file.Time, ModTime: files[i].Time,
IsDir: file.Type == ftp.EntryTypeFolder, IsDir: files[i].Type == ftp.EntryTypeFolder,
} }
return info, nil return info, nil
} }
@@ -631,7 +452,6 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
// mkdir makes the directory and parents using unrooted paths // mkdir makes the directory and parents using unrooted paths
func (f *Fs) mkdir(abspath string) error { func (f *Fs) mkdir(abspath string) error {
abspath = path.Clean(abspath)
if abspath == "." || abspath == "/" { if abspath == "." || abspath == "/" {
return nil return nil
} }
@@ -653,7 +473,7 @@ func (f *Fs) mkdir(abspath string) error {
if connErr != nil { if connErr != nil {
return errors.Wrap(connErr, "mkdir") return errors.Wrap(connErr, "mkdir")
} }
err = c.MakeDir(f.dirFromStandardPath(abspath)) err = c.MakeDir(abspath)
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
switch errX := err.(type) { switch errX := err.(type) {
case *textproto.Error: case *textproto.Error:
@@ -675,7 +495,7 @@ func (f *Fs) mkParentDir(remote string) error {
} }
// Mkdir creates the directory if it doesn't exist // Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { func (f *Fs) Mkdir(dir string) (err error) {
// defer fs.Trace(dir, "")("err=%v", &err) // defer fs.Trace(dir, "")("err=%v", &err)
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
return f.mkdir(root) return f.mkdir(root)
@@ -684,18 +504,18 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
c, err := f.getFtpConnection() c, err := f.getFtpConnection()
if err != nil { if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir") return errors.Wrap(translateErrorFile(err), "Rmdir")
} }
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir))) err = c.RemoveDir(path.Join(f.root, dir))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
return translateErrorDir(err) return translateErrorDir(err)
} }
// Move renames a remote file object // Move renames a remote file object
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
@@ -710,14 +530,14 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, errors.Wrap(err, "Move") return nil, errors.Wrap(err, "Move")
} }
err = c.Rename( err = c.Rename(
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)), path.Join(srcObj.fs.root, srcObj.remote),
f.opt.Enc.FromStandardPath(path.Join(f.root, remote)), path.Join(f.root, remote),
) )
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move Rename failed") return nil, errors.Wrap(err, "Move Rename failed")
} }
dstObj, err := f.NewObject(ctx, remote) dstObj, err := f.NewObject(remote)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move NewObject failed") return nil, errors.Wrap(err, "Move NewObject failed")
} }
@@ -732,7 +552,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -764,8 +584,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return errors.Wrap(err, "DirMove") return errors.Wrap(err, "DirMove")
} }
err = c.Rename( err = c.Rename(
f.dirFromStandardPath(srcPath), srcPath,
f.dirFromStandardPath(dstPath), dstPath,
) )
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
@@ -795,7 +615,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the hash of an object returning a lowercase hex string // Hash returns the hash of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -805,12 +625,12 @@ func (o *Object) Size() int64 {
} }
// ModTime returns the modification time of the object // ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
return o.info.ModTime return o.info.ModTime
} }
// SetModTime sets the modification time of the object // SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
return nil return nil
} }
@@ -838,36 +658,18 @@ func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
// Close the FTP reader and return the connection to the pool // Close the FTP reader and return the connection to the pool
func (f *ftpReadCloser) Close() error { func (f *ftpReadCloser) Close() error {
var err error err := f.rc.Close()
errchan := make(chan error, 1)
go func() {
errchan <- f.rc.Close()
}()
// Wait for Close for up to 60 seconds
timer := time.NewTimer(60 * time.Second)
select {
case err = <-errchan:
timer.Stop()
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f.f, "Timeout when waiting for connection Close")
f.f.putFtpConnection(nil, nil)
return nil
}
// if errors while reading or closing, dump the connection // if errors while reading or closing, dump the connection
if err != nil || f.err != nil { if err != nil || f.err != nil {
_ = f.c.Quit() _ = f.c.Quit()
f.f.putFtpConnection(nil, nil)
} else { } else {
f.f.putFtpConnection(&f.c, nil) f.f.putFtpConnection(&f.c, nil)
} }
// mask the error if it was caused by a premature close // mask the error if it was caused by a premature close
// NB StatusAboutToSend is to work around a bug in pureftpd
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
switch errX := err.(type) { switch errX := err.(type) {
case *textproto.Error: case *textproto.Error:
switch errX.Code { switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend: case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable:
err = nil err = nil
} }
} }
@@ -875,7 +677,7 @@ func (f *ftpReadCloser) Close() error {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err) // defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
path := path.Join(o.fs.root, o.remote) path := path.Join(o.fs.root, o.remote)
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
@@ -895,7 +697,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
if err != nil { if err != nil {
return nil, errors.Wrap(err, "open") return nil, errors.Wrap(err, "open")
} }
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset)) fd, err := c.RetrFrom(path, uint64(offset))
if err != nil { if err != nil {
o.fs.putFtpConnection(&c, err) o.fs.putFtpConnection(&c, err)
return nil, errors.Wrap(err, "open") return nil, errors.Wrap(err, "open")
@@ -909,17 +711,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
// Copy the reader into the object updating modTime and size // Copy the reader into the object updating modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
// defer fs.Trace(o, "src=%v", src)("err=%v", &err) // defer fs.Trace(o, "src=%v", src)("err=%v", &err)
path := path.Join(o.fs.root, o.remote) path := path.Join(o.fs.root, o.remote)
// remove the file if upload failed // remove the file if upload failed
remove := func() { remove := func() {
// Give the FTP server a chance to get its internal state in order after the error. removeErr := o.Remove()
// The error may have been local in which case we closed the connection. The server
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
// able to think of a better method to find out if the server has finished - ncw
time.Sleep(1 * time.Second)
removeErr := o.Remove(ctx)
if removeErr != nil { if removeErr != nil {
fs.Debugf(o, "Failed to remove: %v", removeErr) fs.Debugf(o, "Failed to remove: %v", removeErr)
} else { } else {
@@ -930,11 +727,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil { if err != nil {
return errors.Wrap(err, "Update") return errors.Wrap(err, "Update")
} }
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in) err = c.Stor(path, in)
if err != nil { if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors _ = c.Quit()
remove() remove()
o.fs.putFtpConnection(nil, err)
return errors.Wrap(err, "update stor") return errors.Wrap(err, "update stor")
} }
o.fs.putFtpConnection(&c, nil) o.fs.putFtpConnection(&c, nil)
@@ -946,7 +742,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) (err error) { func (o *Object) Remove() (err error) {
// defer fs.Trace(o, "")("err=%v", &err) // defer fs.Trace(o, "")("err=%v", &err)
path := path.Join(o.fs.root, o.remote) path := path.Join(o.fs.root, o.remote)
// Check if it's a directory or a file // Check if it's a directory or a file
@@ -955,13 +751,13 @@ func (o *Object) Remove(ctx context.Context) (err error) {
return err return err
} }
if info.IsDir { if info.IsDir {
err = o.fs.Rmdir(ctx, o.remote) err = o.fs.Rmdir(o.remote)
} else { } else {
c, err := o.fs.getFtpConnection() c, err := o.fs.getFtpConnection()
if err != nil { if err != nil {
return errors.Wrap(err, "Remove") return errors.Wrap(err, "Remove")
} }
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path)) err = c.Delete(path)
o.fs.putFtpConnection(&c, err) o.fs.putFtpConnection(&c, err)
} }
return err return err

View File

@@ -4,45 +4,14 @@ package ftp_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/ftp" "github.com/ncw/rclone/backend/ftp"
"github.com/rclone/rclone/fstest" "github.com/ncw/rclone/fstest/fstests"
"github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPProftpd:", RemoteName: "TestFTP:",
NilObject: (*ftp.Object)(nil), NilObject: (*ftp.Object)(nil),
}) })
} }
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPRclone:",
NilObject: (*ftp.Object)(nil),
})
}
func TestIntegration3(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPPureftpd:",
NilObject: (*ftp.Object)(nil),
})
}
// func TestIntegration4(t *testing.T) {
// if *fstest.RemoteName != "" {
// t.Skip("skipping as -remote is set")
// }
// fstests.Run(t, &fstests.Opt{
// RemoteName: "TestFTPVsftpd:",
// NilObject: (*ftp.Object)(nil),
// })
// }

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,11 @@
// Test GoogleCloudStorage filesystem interface // Test GoogleCloudStorage filesystem interface
package googlecloudstorage_test package googlecloudstorage_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/googlecloudstorage" "github.com/ncw/rclone/backend/googlecloudstorage"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -1,148 +0,0 @@
// This file contains the albums abstraction
package googlephotos
import (
"path"
"strings"
"sync"
"github.com/rclone/rclone/backend/googlephotos/api"
)
// All the albums
type albums struct {
mu sync.Mutex
dupes map[string][]*api.Album // duplicated names
byID map[string]*api.Album //..indexed by ID
byTitle map[string]*api.Album //..indexed by Title
path map[string][]string // partial album names to directory
}
// Create a new album
func newAlbums() *albums {
return &albums{
dupes: map[string][]*api.Album{},
byID: map[string]*api.Album{},
byTitle: map[string]*api.Album{},
path: map[string][]string{},
}
}
// add an album
func (as *albums) add(album *api.Album) {
// Munge the name of the album into a sensible path name
album.Title = path.Clean(album.Title)
if album.Title == "." || album.Title == "/" {
album.Title = addID("", album.ID)
}
as.mu.Lock()
as._add(album)
as.mu.Unlock()
}
// _add an album - call with lock held
func (as *albums) _add(album *api.Album) {
// update dupes by title
dupes := as.dupes[album.Title]
dupes = append(dupes, album)
as.dupes[album.Title] = dupes
// Dedupe the album name if necessary
if len(dupes) >= 2 {
// If this is the first dupe, then need to adjust the first one
if len(dupes) == 2 {
firstAlbum := dupes[0]
as._del(firstAlbum)
as._add(firstAlbum)
// undo add of firstAlbum to dupes
as.dupes[album.Title] = dupes
}
album.Title = addID(album.Title, album.ID)
}
// Store the new album
as.byID[album.ID] = album
as.byTitle[album.Title] = album
// Store the partial paths
dir, leaf := album.Title, ""
for dir != "" {
i := strings.LastIndex(dir, "/")
if i >= 0 {
dir, leaf = dir[:i], dir[i+1:]
} else {
dir, leaf = "", dir
}
dirs := as.path[dir]
found := false
for _, dir := range dirs {
if dir == leaf {
found = true
}
}
if !found {
as.path[dir] = append(as.path[dir], leaf)
}
}
}
// del an album
func (as *albums) del(album *api.Album) {
as.mu.Lock()
as._del(album)
as.mu.Unlock()
}
// _del an album - call with lock held
func (as *albums) _del(album *api.Album) {
// We leave in dupes so it doesn't cause albums to get renamed
// Remove from byID and byTitle
delete(as.byID, album.ID)
delete(as.byTitle, album.Title)
// Remove from paths
dir, leaf := album.Title, ""
for dir != "" {
// Can't delete if this dir exists anywhere in the path structure
if _, found := as.path[dir]; found {
break
}
i := strings.LastIndex(dir, "/")
if i >= 0 {
dir, leaf = dir[:i], dir[i+1:]
} else {
dir, leaf = "", dir
}
dirs := as.path[dir]
for i, dir := range dirs {
if dir == leaf {
dirs = append(dirs[:i], dirs[i+1:]...)
break
}
}
if len(dirs) == 0 {
delete(as.path, dir)
} else {
as.path[dir] = dirs
}
}
}
// get an album by title
func (as *albums) get(title string) (album *api.Album, ok bool) {
as.mu.Lock()
defer as.mu.Unlock()
album, ok = as.byTitle[title]
return album, ok
}
// getDirs gets directories below an album path
func (as *albums) getDirs(albumPath string) (dirs []string, ok bool) {
as.mu.Lock()
defer as.mu.Unlock()
dirs, ok = as.path[albumPath]
return dirs, ok
}

View File

@@ -1,311 +0,0 @@
package googlephotos
import (
"testing"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/stretchr/testify/assert"
)
func TestNewAlbums(t *testing.T) {
albums := newAlbums()
assert.NotNil(t, albums.dupes)
assert.NotNil(t, albums.byID)
assert.NotNil(t, albums.byTitle)
assert.NotNil(t, albums.path)
}
func TestAlbumsAdd(t *testing.T) {
albums := newAlbums()
assert.Equal(t, map[string][]*api.Album{}, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
assert.Equal(t, map[string][]string{}, albums.path)
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one"},
}, albums.path)
a2 := &api.Album{
Title: "two",
ID: "2",
}
albums.add(a2)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"two": a2,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two"},
}, albums.path)
// Add a duplicate
a2a := &api.Album{
Title: "two",
ID: "2a",
}
albums.add(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"},
}, albums.path)
// Add a sub directory
a1sub := &api.Album{
Title: "one/sub",
ID: "1sub",
}
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"},
"one": {"sub"},
}, albums.path)
// Add a weird path
a0 := &api.Album{
Title: "/../././..////.",
ID: "0",
}
albums.add(a0)
assert.Equal(t, map[string][]*api.Album{
"{0}": {a0},
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"0": a0,
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"{0}": a0,
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}", "{0}"},
"one": {"sub"},
}, albums.path)
}
func TestAlbumsDel(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
a2 := &api.Album{
Title: "two",
ID: "2",
}
albums.add(a2)
// Add a duplicate
a2a := &api.Album{
Title: "two",
ID: "2a",
}
albums.add(a2a)
// Add a sub directory
a1sub := &api.Album{
Title: "one/sub",
ID: "1sub",
}
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"},
"one": {"sub"},
}, albums.path)
albums.del(a1)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"},
"one": {"sub"},
}, albums.path)
albums.del(a2)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2a}"},
"one": {"sub"},
}, albums.path)
albums.del(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one"},
"one": {"sub"},
}, albums.path)
albums.del(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
assert.Equal(t, map[string][]string{}, albums.path)
}
func TestAlbumsGet(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
album, ok := albums.get("one")
assert.Equal(t, true, ok)
assert.Equal(t, a1, album)
album, ok = albums.get("notfound")
assert.Equal(t, false, ok)
assert.Nil(t, album)
}
func TestAlbumsGetDirs(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
dirs, ok := albums.getDirs("")
assert.Equal(t, true, ok)
assert.Equal(t, []string{"one"}, dirs)
dirs, ok = albums.getDirs("notfound")
assert.Equal(t, false, ok)
assert.Nil(t, dirs)
}

View File

@@ -1,190 +0,0 @@
package api
import (
"fmt"
"time"
)
// ErrorDetails in the internals of the Error type
type ErrorDetails struct {
Code int `json:"code"`
Message string `json:"message"`
Status string `json:"status"`
}
// Error is returned on errors
type Error struct {
Details ErrorDetails `json:"error"`
}
// Error satisfies error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
}
// Album of photos
type Album struct {
ID string `json:"id,omitempty"`
Title string `json:"title"`
ProductURL string `json:"productUrl,omitempty"`
MediaItemsCount string `json:"mediaItemsCount,omitempty"`
CoverPhotoBaseURL string `json:"coverPhotoBaseUrl,omitempty"`
CoverPhotoMediaItemID string `json:"coverPhotoMediaItemId,omitempty"`
IsWriteable bool `json:"isWriteable,omitempty"`
}
// ListAlbums is returned from albums.list and sharedAlbums.list
type ListAlbums struct {
Albums []Album `json:"albums"`
SharedAlbums []Album `json:"sharedAlbums"`
NextPageToken string `json:"nextPageToken"`
}
// CreateAlbum creates an Album
type CreateAlbum struct {
Album *Album `json:"album"`
}
// MediaItem is a photo or video
type MediaItem struct {
ID string `json:"id"`
ProductURL string `json:"productUrl"`
BaseURL string `json:"baseUrl"`
MimeType string `json:"mimeType"`
MediaMetadata struct {
CreationTime time.Time `json:"creationTime"`
Width string `json:"width"`
Height string `json:"height"`
Photo struct {
} `json:"photo"`
} `json:"mediaMetadata"`
Filename string `json:"filename"`
}
// MediaItems is returned from mediaitems.list, mediaitems.search
type MediaItems struct {
MediaItems []MediaItem `json:"mediaItems"`
NextPageToken string `json:"nextPageToken"`
}
//Content categories
// NONE Default content category. This category is ignored when any other category is used in the filter.
// LANDSCAPES Media items containing landscapes.
// RECEIPTS Media items containing receipts.
// CITYSCAPES Media items containing cityscapes.
// LANDMARKS Media items containing landmarks.
// SELFIES Media items that are selfies.
// PEOPLE Media items containing people.
// PETS Media items containing pets.
// WEDDINGS Media items from weddings.
// BIRTHDAYS Media items from birthdays.
// DOCUMENTS Media items containing documents.
// TRAVEL Media items taken during travel.
// ANIMALS Media items containing animals.
// FOOD Media items containing food.
// SPORT Media items from sporting events.
// NIGHT Media items taken at night.
// PERFORMANCES Media items from performances.
// WHITEBOARDS Media items containing whiteboards.
// SCREENSHOTS Media items that are screenshots.
// UTILITY Media items that are considered to be utility. These include, but aren't limited to documents, screenshots, whiteboards etc.
// ARTS Media items containing art.
// CRAFTS Media items containing crafts.
// FASHION Media items related to fashion.
// HOUSES Media items containing houses.
// GARDENS Media items containing gardens.
// FLOWERS Media items containing flowers.
// HOLIDAYS Media items taken of holidays.
// MediaTypes
// ALL_MEDIA Treated as if no filters are applied. All media types are included.
// VIDEO All media items that are considered videos. This also includes movies the user has created using the Google Photos app.
// PHOTO All media items that are considered photos. This includes .bmp, .gif, .ico, .jpg (and other spellings), .tiff, .webp and special photo types such as iOS live photos, Android motion photos, panoramas, photospheres.
// Features
// NONE Treated as if no filters are applied. All features are included.
// FAVORITES Media items that the user has marked as favorites in the Google Photos app.
// Date is used as part of SearchFilter
type Date struct {
Year int `json:"year,omitempty"`
Month int `json:"month,omitempty"`
Day int `json:"day,omitempty"`
}
// DateFilter is uses to add date ranges to media item queries
type DateFilter struct {
Dates []Date `json:"dates,omitempty"`
Ranges []struct {
StartDate Date `json:"startDate,omitempty"`
EndDate Date `json:"endDate,omitempty"`
} `json:"ranges,omitempty"`
}
// ContentFilter is uses to add content categories to media item queries
type ContentFilter struct {
IncludedContentCategories []string `json:"includedContentCategories,omitempty"`
ExcludedContentCategories []string `json:"excludedContentCategories,omitempty"`
}
// MediaTypeFilter is uses to add media types to media item queries
type MediaTypeFilter struct {
MediaTypes []string `json:"mediaTypes,omitempty"`
}
// FeatureFilter is uses to add features to media item queries
type FeatureFilter struct {
IncludedFeatures []string `json:"includedFeatures,omitempty"`
}
// Filters combines all the filter types for media item queries
type Filters struct {
DateFilter *DateFilter `json:"dateFilter,omitempty"`
ContentFilter *ContentFilter `json:"contentFilter,omitempty"`
MediaTypeFilter *MediaTypeFilter `json:"mediaTypeFilter,omitempty"`
FeatureFilter *FeatureFilter `json:"featureFilter,omitempty"`
IncludeArchivedMedia *bool `json:"includeArchivedMedia,omitempty"`
ExcludeNonAppCreatedData *bool `json:"excludeNonAppCreatedData,omitempty"`
}
// SearchFilter is uses with mediaItems.search
type SearchFilter struct {
AlbumID string `json:"albumId,omitempty"`
PageSize int `json:"pageSize"`
PageToken string `json:"pageToken,omitempty"`
Filters *Filters `json:"filters,omitempty"`
}
// SimpleMediaItem is part of NewMediaItem
type SimpleMediaItem struct {
UploadToken string `json:"uploadToken"`
}
// NewMediaItem is a single media item for upload
type NewMediaItem struct {
Description string `json:"description"`
SimpleMediaItem SimpleMediaItem `json:"simpleMediaItem"`
}
// BatchCreateRequest creates media items from upload tokens
type BatchCreateRequest struct {
AlbumID string `json:"albumId,omitempty"`
NewMediaItems []NewMediaItem `json:"newMediaItems"`
}
// BatchCreateResponse is returned from BatchCreateRequest
type BatchCreateResponse struct {
NewMediaItemResults []struct {
UploadToken string `json:"uploadToken"`
Status struct {
Message string `json:"message"`
Code int `json:"code"`
} `json:"status"`
MediaItem MediaItem `json:"mediaItem"`
} `json:"newMediaItemResults"`
}
// BatchRemoveItems is for removing items from an album
type BatchRemoveItems struct {
MediaItemIds []string `json:"mediaItemIds"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,297 +0,0 @@
package googlephotos
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"path"
"testing"
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
// We have two different files here as Google Photos will uniq
// them otherwise which confuses the tests as the filename is
// unexpected.
fileNameAlbum = "rclone-test-image1.jpg"
fileNameUpload = "rclone-test-image2.jpg"
)
func TestIntegration(t *testing.T) {
ctx := context.Background()
fstest.Initialise()
// Create Fs
if *fstest.RemoteName == "" {
*fstest.RemoteName = "TestGooglePhotos:"
}
f, err := fs.NewFs(*fstest.RemoteName)
if err == fs.ErrorNotFoundInConfigFile {
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
}
require.NoError(t, err)
// Create local Fs pointing at testfiles
localFs, err := fs.NewFs("testfiles")
require.NoError(t, err)
t.Run("CreateAlbum", func(t *testing.T) {
albumName := "album/rclone-test-" + random.String(24)
err = f.Mkdir(ctx, albumName)
require.NoError(t, err)
remote := albumName + "/" + fileNameAlbum
t.Run("PutFile", func(t *testing.T) {
srcObj, err := localFs.NewObject(ctx, fileNameAlbum)
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
remoteWithID := addFileID(remote, dstObj.(*Object).id)
t.Run("ObjectFs", func(t *testing.T) {
assert.Equal(t, f, dstObj.Fs())
})
t.Run("ObjectString", func(t *testing.T) {
assert.Equal(t, remote, dstObj.String())
assert.Equal(t, "<nil>", (*Object)(nil).String())
})
t.Run("ObjectHash", func(t *testing.T) {
h, err := dstObj.Hash(ctx, hash.MD5)
assert.Equal(t, "", h)
assert.Equal(t, hash.ErrUnsupported, err)
})
t.Run("ObjectSize", func(t *testing.T) {
assert.Equal(t, int64(-1), dstObj.Size())
f.(*Fs).opt.ReadSize = true
defer func() {
f.(*Fs).opt.ReadSize = false
}()
size := dstObj.Size()
assert.True(t, size > 1000, fmt.Sprintf("Size too small %d", size))
})
t.Run("ObjectSetModTime", func(t *testing.T) {
err := dstObj.SetModTime(ctx, time.Now())
assert.Equal(t, fs.ErrorCantSetModTime, err)
})
t.Run("ObjectStorable", func(t *testing.T) {
assert.True(t, dstObj.Storable())
})
t.Run("ObjectOpen", func(t *testing.T) {
in, err := dstObj.Open(ctx)
require.NoError(t, err)
buf, err := ioutil.ReadAll(in)
require.NoError(t, err)
require.NoError(t, in.Close())
assert.True(t, len(buf) > 1000)
contentType := http.DetectContentType(buf[:512])
assert.Equal(t, "image/jpeg", contentType)
})
t.Run("CheckFileInAlbum", func(t *testing.T) {
entries, err := f.List(ctx, albumName)
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, remote, entries[0].Remote())
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
// Check it is there in the date/month/year heirachy
// 2013-07-13 is the creation date of the folder
checkPresent := func(t *testing.T, objPath string) {
entries, err := f.List(ctx, objPath)
require.NoError(t, err)
found := false
for _, entry := range entries {
leaf := path.Base(entry.Remote())
if leaf == fileNameAlbum || leaf == remoteWithID {
found = true
}
}
assert.True(t, found, fmt.Sprintf("didn't find %q in %q", fileNameAlbum, objPath))
}
t.Run("CheckInByYear", func(t *testing.T) {
checkPresent(t, "media/by-year/2013")
})
t.Run("CheckInByMonth", func(t *testing.T) {
checkPresent(t, "media/by-month/2013/2013-07")
})
t.Run("CheckInByDay", func(t *testing.T) {
checkPresent(t, "media/by-day/2013/2013-07-26")
})
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, remote)
require.NoError(t, err)
require.Equal(t, remote, o.Remote())
})
t.Run("NewObjectWithID", func(t *testing.T) {
o, err := f.NewObject(ctx, remoteWithID)
require.NoError(t, err)
require.Equal(t, remoteWithID, o.Remote())
})
t.Run("NewFsIsFile", func(t *testing.T) {
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
assert.Equal(t, fs.ErrorIsFile, err)
leaf := path.Base(remote)
o, err := fNew.NewObject(ctx, leaf)
require.NoError(t, err)
require.Equal(t, leaf, o.Remote())
})
t.Run("RemoveFileFromAlbum", func(t *testing.T) {
err = dstObj.Remove(ctx)
require.NoError(t, err)
time.Sleep(time.Second)
// Check album empty
entries, err := f.List(ctx, albumName)
require.NoError(t, err)
assert.Equal(t, 0, len(entries))
})
})
// remove the album
err = f.Rmdir(ctx, albumName)
require.Error(t, err) // FIXME doesn't work yet
})
t.Run("UploadMkdir", func(t *testing.T) {
assert.NoError(t, f.Mkdir(ctx, "upload/dir"))
assert.NoError(t, f.Mkdir(ctx, "upload/dir/subdir"))
t.Run("List", func(t *testing.T) {
entries, err := f.List(ctx, "upload")
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "upload/dir", entries[0].Remote())
entries, err = f.List(ctx, "upload/dir")
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "upload/dir/subdir", entries[0].Remote())
})
t.Run("Rmdir", func(t *testing.T) {
assert.NoError(t, f.Rmdir(ctx, "upload/dir/subdir"))
assert.NoError(t, f.Rmdir(ctx, "upload/dir"))
})
t.Run("ListEmpty", func(t *testing.T) {
entries, err := f.List(ctx, "upload")
require.NoError(t, err)
assert.Equal(t, 0, len(entries))
_, err = f.List(ctx, "upload/dir")
assert.Equal(t, fs.ErrorDirNotFound, err)
})
})
t.Run("Upload", func(t *testing.T) {
uploadDir := "upload/dir/subdir"
remote := path.Join(uploadDir, fileNameUpload)
srcObj, err := localFs.NewObject(ctx, fileNameUpload)
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
remoteWithID := addFileID(remote, dstObj.(*Object).id)
t.Run("List", func(t *testing.T) {
entries, err := f.List(ctx, uploadDir)
require.NoError(t, err)
require.Equal(t, 1, len(entries))
assert.Equal(t, remote, entries[0].Remote())
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, remote)
require.NoError(t, err)
require.Equal(t, remote, o.Remote())
})
t.Run("NewObjectWithID", func(t *testing.T) {
o, err := f.NewObject(ctx, remoteWithID)
require.NoError(t, err)
require.Equal(t, remoteWithID, o.Remote())
})
})
t.Run("Name", func(t *testing.T) {
assert.Equal(t, (*fstest.RemoteName)[:len(*fstest.RemoteName)-1], f.Name())
})
t.Run("Root", func(t *testing.T) {
assert.Equal(t, "", f.Root())
})
t.Run("String", func(t *testing.T) {
assert.Equal(t, `Google Photos path ""`, f.String())
})
t.Run("Features", func(t *testing.T) {
features := f.Features()
assert.False(t, features.CaseInsensitive)
assert.True(t, features.ReadMimeType)
})
t.Run("Precision", func(t *testing.T) {
assert.Equal(t, fs.ModTimeNotSupported, f.Precision())
})
t.Run("Hashes", func(t *testing.T) {
assert.Equal(t, hash.Set(hash.None), f.Hashes())
})
}
func TestAddID(t *testing.T) {
assert.Equal(t, "potato {123}", addID("potato", "123"))
assert.Equal(t, "{123}", addID("", "123"))
}
func TestFileAddID(t *testing.T) {
assert.Equal(t, "potato {123}.txt", addFileID("potato.txt", "123"))
assert.Equal(t, "potato {123}", addFileID("potato", "123"))
assert.Equal(t, "{123}", addFileID("", "123"))
}
func TestFindID(t *testing.T) {
assert.Equal(t, "", findID("potato"))
ID := "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
assert.Equal(t, ID, findID("potato {"+ID+"}.txt"))
ID = ID[1:]
assert.Equal(t, "", findID("potato {"+ID+"}.txt"))
}

View File

@@ -1,376 +0,0 @@
// Store the parsing of file patterns
package googlephotos
import (
"context"
"fmt"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
)
// lister describes the subset of the interfaces on Fs needed for the
// file pattern parsing
type lister interface {
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
dirTime() time.Time
startYear() int
}
// dirPattern describes a single directory pattern
type dirPattern struct {
re string // match for the path
match *regexp.Regexp // compiled match
canUpload bool // true if can upload here
canMkdir bool // true if can make a directory here
isFile bool // true if this is a file
isUpload bool // true if this is the upload directory
// function to turn a match into DirEntries
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
}
// dirPatters is a slice of all the directory patterns
type dirPatterns []dirPattern
// patterns describes the layout of the google photos backend file system.
//
// NB no trailing / on paths
var patterns = dirPatterns{
{
re: `^$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return fs.DirEntries{
fs.NewDir(prefix+"media", f.dirTime()),
fs.NewDir(prefix+"album", f.dirTime()),
fs.NewDir(prefix+"shared-album", f.dirTime()),
fs.NewDir(prefix+"upload", f.dirTime()),
fs.NewDir(prefix+"feature", f.dirTime()),
}, nil
},
},
{
re: `^upload(?:/(.*))?$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return f.listUploads(ctx, match[0])
},
canUpload: true,
canMkdir: true,
isUpload: true,
},
{
re: `^upload/(.*)$`,
isFile: true,
canUpload: true,
isUpload: true,
},
{
re: `^media$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return fs.DirEntries{
fs.NewDir(prefix+"all", f.dirTime()),
fs.NewDir(prefix+"by-year", f.dirTime()),
fs.NewDir(prefix+"by-month", f.dirTime()),
fs.NewDir(prefix+"by-day", f.dirTime()),
}, nil
},
},
{
re: `^media/all$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return f.listDir(ctx, prefix, api.SearchFilter{})
},
},
{
re: `^media/all/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-year$`,
toEntries: years,
},
{
re: `^media/by-year/(\d{4})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-year/(\d{4})/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-month$`,
toEntries: years,
},
{
re: `^media/by-month/(\d{4})$`,
toEntries: months,
},
{
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-day$`,
toEntries: years,
},
{
re: `^media/by-day/(\d{4})$`,
toEntries: days,
},
{
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})/([^/]+)$`,
isFile: true,
},
{
re: `^album$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, false, prefix, "")
},
},
{
re: `^album/(.+)$`,
canMkdir: true,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, false, prefix, match[1])
},
},
{
re: `^album/(.+?)/([^/]+)$`,
canUpload: true,
isFile: true,
},
{
re: `^shared-album$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, true, prefix, "")
},
},
{
re: `^shared-album/(.+)$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, true, prefix, match[1])
},
},
{
re: `^shared-album/(.+?)/([^/]+)$`,
isFile: true,
},
{
re: `^feature$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return fs.DirEntries{
fs.NewDir(prefix+"favorites", f.dirTime()),
}, nil
},
},
{
re: `^feature/favorites$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
filter := featureFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^feature/favorites/([^/]+)$`,
isFile: true,
},
}.mustCompile()
// mustCompile compiles the regexps in the dirPatterns
func (ds dirPatterns) mustCompile() dirPatterns {
for i := range ds {
pattern := &ds[i]
pattern.match = regexp.MustCompile(pattern.re)
}
return ds
}
// match finds the path passed in the matching structure and
// returns the parameters and a pointer to the match, or nil.
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
itemPath = strings.Trim(itemPath, "/")
absPath := path.Join(root, itemPath)
prefix = strings.Trim(absPath[len(root):], "/")
if prefix != "" {
prefix += "/"
}
for i := range ds {
pattern = &ds[i]
if pattern.isFile != isFile {
continue
}
match = pattern.match.FindStringSubmatch(absPath)
if match != nil {
return
}
}
return nil, "", nil
}
// Return the years from startYear to today
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
currentYear := f.dirTime().Year()
for year := f.startYear(); year <= currentYear; year++ {
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
}
return entries, nil
}
// Return the months in a given year
func months(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
year := match[1]
for month := 1; month <= 12; month++ {
entries = append(entries, fs.NewDir(fmt.Sprintf("%s%s-%02d", prefix, year, month), f.dirTime()))
}
return entries, nil
}
// Return the days in a given year
func days(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
year := match[1]
current, err := time.Parse("2006", year)
if err != nil {
return nil, errors.Errorf("bad year %q", match[1])
}
currentYear := current.Year()
for current.Year() == currentYear {
entries = append(entries, fs.NewDir(prefix+current.Format("2006-01-02"), f.dirTime()))
current = current.AddDate(0, 0, 1)
}
return entries, nil
}
// This creates a search filter on year/month/day as provided
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
year, err := strconv.Atoi(match[1])
if err != nil || year < 1000 || year > 3000 {
return sf, errors.Errorf("bad year %q", match[1])
}
sf = api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Year: year,
},
},
},
},
}
if len(match) >= 3 {
month, err := strconv.Atoi(match[2])
if err != nil || month < 1 || month > 12 {
return sf, errors.Errorf("bad month %q", match[2])
}
sf.Filters.DateFilter.Dates[0].Month = month
}
if len(match) >= 4 {
day, err := strconv.Atoi(match[3])
if err != nil || day < 1 || day > 31 {
return sf, errors.Errorf("bad day %q", match[3])
}
sf.Filters.DateFilter.Dates[0].Day = day
}
return sf, nil
}
// featureFilter creates a filter for the Feature enum
//
// The API only supports one feature, FAVORITES, so hardcode that feature
//
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {
sf = api.SearchFilter{
Filters: &api.Filters{
FeatureFilter: &api.FeatureFilter{
IncludedFeatures: []string{
"FAVORITES",
},
},
},
}
return sf
}
// Turns an albumPath into entries
//
// These can either be synthetic directory entries if the album path
// is a prefix of another album, or actual files, or a combination of
// the two.
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
albums, err := f.listAlbums(ctx, shared)
if err != nil {
return nil, err
}
// Put in the directories
dirs, foundAlbumPath := albums.getDirs(albumPath)
if foundAlbumPath {
for _, dir := range dirs {
d := fs.NewDir(prefix+dir, f.dirTime())
dirPath := path.Join(albumPath, dir)
// if this dir is an album add more special stuff
album, ok := albums.get(dirPath)
if ok {
count, err := strconv.ParseInt(album.MediaItemsCount, 10, 64)
if err != nil {
fs.Debugf(f, "Error reading media count: %v", err)
}
d.SetID(album.ID).SetItems(count)
}
entries = append(entries, d)
}
}
// if this is an album then return a filter to list it
album, foundAlbum := albums.get(albumPath)
if foundAlbum {
filter := api.SearchFilter{AlbumID: album.ID}
newEntries, err := f.listDir(ctx, prefix, filter)
if err != nil {
return nil, err
}
entries = append(entries, newEntries...)
}
if !foundAlbumPath && !foundAlbum && albumPath != "" {
return nil, fs.ErrorDirNotFound
}
return entries, nil
}

View File

@@ -1,532 +0,0 @@
package googlephotos
import (
"context"
"fmt"
"testing"
"time"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// time for directories
var startTime = fstest.Time("2019-06-24T15:53:05.999999999Z")
// mock Fs for testing patterns
type testLister struct {
t *testing.T
albums *albums
names []string
uploaded dirtree.DirTree
}
// newTestLister makes a mock for testing
func newTestLister(t *testing.T) *testLister {
return &testLister{
t: t,
albums: newAlbums(),
uploaded: dirtree.New(),
}
}
// mock listDir for testing
func (f *testLister) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
for _, name := range f.names {
entries = append(entries, mockobject.New(prefix+name))
}
return entries, nil
}
// mock listAlbums for testing
func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
return f.albums, nil
}
// mock listUploads for testing
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, _ = f.uploaded[dir]
return entries, nil
}
// mock dirTime for testing
func (f *testLister) dirTime() time.Time {
return startTime
}
// mock startYear for testing
func (f *testLister) startYear() int {
return 2000
}
func TestPatternMatch(t *testing.T) {
for testNumber, test := range []struct {
// input
root string
itemPath string
isFile bool
// expected output
wantMatch []string
wantPrefix string
wantPattern *dirPattern
}{
{
root: "",
itemPath: "",
isFile: false,
wantMatch: []string{""},
wantPrefix: "",
wantPattern: &patterns[0],
},
{
root: "",
itemPath: "",
isFile: true,
wantMatch: nil,
wantPrefix: "",
wantPattern: nil,
},
{
root: "upload",
itemPath: "",
isFile: false,
wantMatch: []string{"upload", ""},
wantPrefix: "",
wantPattern: &patterns[1],
},
{
root: "upload/dir",
itemPath: "",
isFile: false,
wantMatch: []string{"upload/dir", "dir"},
wantPrefix: "",
wantPattern: &patterns[1],
},
{
root: "upload/file.jpg",
itemPath: "",
isFile: true,
wantMatch: []string{"upload/file.jpg", "file.jpg"},
wantPrefix: "",
wantPattern: &patterns[2],
},
{
root: "media",
itemPath: "",
isFile: false,
wantMatch: []string{"media"},
wantPrefix: "",
wantPattern: &patterns[3],
},
{
root: "",
itemPath: "media",
isFile: false,
wantMatch: []string{"media"},
wantPrefix: "media/",
wantPattern: &patterns[3],
},
{
root: "media/all",
itemPath: "",
isFile: false,
wantMatch: []string{"media/all"},
wantPrefix: "",
wantPattern: &patterns[4],
},
{
root: "media",
itemPath: "all",
isFile: false,
wantMatch: []string{"media/all"},
wantPrefix: "all/",
wantPattern: &patterns[4],
},
{
root: "media/all",
itemPath: "file.jpg",
isFile: true,
wantMatch: []string{"media/all/file.jpg", "file.jpg"},
wantPrefix: "file.jpg/",
wantPattern: &patterns[5],
},
{
root: "",
itemPath: "feature",
isFile: false,
wantMatch: []string{"feature"},
wantPrefix: "feature/",
wantPattern: &patterns[23],
},
{
root: "feature/favorites",
itemPath: "",
isFile: false,
wantMatch: []string{"feature/favorites"},
wantPrefix: "",
wantPattern: &patterns[24],
},
{
root: "feature",
itemPath: "favorites",
isFile: false,
wantMatch: []string{"feature/favorites"},
wantPrefix: "favorites/",
wantPattern: &patterns[24],
},
{
root: "feature/favorites",
itemPath: "file.jpg",
isFile: true,
wantMatch: []string{"feature/favorites/file.jpg", "file.jpg"},
wantPrefix: "file.jpg/",
wantPattern: &patterns[25],
},
} {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)
assert.Equal(t, test.wantMatch, gotMatch)
assert.Equal(t, test.wantPrefix, gotPrefix)
assert.Equal(t, test.wantPattern, gotPattern)
})
}
}
func TestPatternMatchToEntries(t *testing.T) {
ctx := context.Background()
f := newTestLister(t)
f.names = []string{"file.jpg"}
f.albums.add(&api.Album{
ID: "1",
Title: "sub/one",
})
f.albums.add(&api.Album{
ID: "2",
Title: "sub",
})
f.uploaded.AddEntry(mockobject.New("upload/file1.jpg"))
f.uploaded.AddEntry(mockobject.New("upload/dir/file2.jpg"))
for testNumber, test := range []struct {
// input
root string
itemPath string
// expected output
wantMatch []string
wantPrefix string
remotes []string
}{
{
root: "",
itemPath: "",
wantMatch: []string{""},
wantPrefix: "",
remotes: []string{"media/", "album/", "shared-album/", "upload/"},
},
{
root: "upload",
itemPath: "",
wantMatch: []string{"upload", ""},
wantPrefix: "",
remotes: []string{"upload/file1.jpg", "upload/dir/"},
},
{
root: "upload",
itemPath: "dir",
wantMatch: []string{"upload/dir", "dir"},
wantPrefix: "dir/",
remotes: []string{"upload/dir/file2.jpg"},
},
{
root: "media",
itemPath: "",
wantMatch: []string{"media"},
wantPrefix: "",
remotes: []string{"all/", "by-year/", "by-month/", "by-day/"},
},
{
root: "media/all",
itemPath: "",
wantMatch: []string{"media/all"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media",
itemPath: "all",
wantMatch: []string{"media/all"},
wantPrefix: "all/",
remotes: []string{"all/file.jpg"},
},
{
root: "media/by-year",
itemPath: "",
wantMatch: []string{"media/by-year"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-year/2000",
itemPath: "",
wantMatch: []string{"media/by-year/2000", "2000"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media/by-month",
itemPath: "",
wantMatch: []string{"media/by-month"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-month/2001",
itemPath: "",
wantMatch: []string{"media/by-month/2001", "2001"},
wantPrefix: "",
remotes: []string{"2001-01/", "2001-02/", "2001-03/", "2001-04/"},
},
{
root: "media/by-month/2001/2001-01",
itemPath: "",
wantMatch: []string{"media/by-month/2001/2001-01", "2001", "01"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media/by-day",
itemPath: "",
wantMatch: []string{"media/by-day"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-day/2001",
itemPath: "",
wantMatch: []string{"media/by-day/2001", "2001"},
wantPrefix: "",
remotes: []string{"2001-01-01/", "2001-01-02/", "2001-01-03/", "2001-01-04/"},
},
{
root: "media/by-day/2001/2001-01-02",
itemPath: "",
wantMatch: []string{"media/by-day/2001/2001-01-02", "2001", "01", "02"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "album",
itemPath: "",
wantMatch: []string{"album"},
wantPrefix: "",
remotes: []string{"sub/"},
},
{
root: "album/sub",
itemPath: "",
wantMatch: []string{"album/sub", "sub"},
wantPrefix: "",
remotes: []string{"one/", "file.jpg"},
},
{
root: "album/sub/one",
itemPath: "",
wantMatch: []string{"album/sub/one", "sub/one"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "shared-album",
itemPath: "",
wantMatch: []string{"shared-album"},
wantPrefix: "",
remotes: []string{"sub/"},
},
{
root: "shared-album/sub",
itemPath: "",
wantMatch: []string{"shared-album/sub", "sub"},
wantPrefix: "",
remotes: []string{"one/", "file.jpg"},
},
{
root: "shared-album/sub/one",
itemPath: "",
wantMatch: []string{"shared-album/sub/one", "sub/one"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
} {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q", testNumber, test.root, test.itemPath), func(t *testing.T) {
match, prefix, pattern := patterns.match(test.root, test.itemPath, false)
assert.Equal(t, test.wantMatch, match)
assert.Equal(t, test.wantPrefix, prefix)
assert.NotNil(t, pattern)
assert.NotNil(t, pattern.toEntries)
entries, err := pattern.toEntries(ctx, f, prefix, match)
assert.NoError(t, err)
var remotes = []string{}
for _, entry := range entries {
remote := entry.Remote()
if _, isDir := entry.(fs.Directory); isDir {
remote += "/"
}
remotes = append(remotes, remote)
if len(remotes) >= 4 {
break // only test first 4 entries
}
}
assert.Equal(t, test.remotes, remotes)
})
}
}
func TestPatternYears(t *testing.T) {
f := newTestLister(t)
entries, err := years(context.Background(), f, "potato/", nil)
require.NoError(t, err)
year := 2000
for _, entry := range entries {
assert.Equal(t, "potato/"+fmt.Sprint(year), entry.Remote())
year++
}
}
func TestPatternMonths(t *testing.T) {
f := newTestLister(t)
entries, err := months(context.Background(), f, "potato/", []string{"", "2020"})
require.NoError(t, err)
assert.Equal(t, 12, len(entries))
for i, entry := range entries {
assert.Equal(t, fmt.Sprintf("potato/2020-%02d", i+1), entry.Remote())
}
}
func TestPatternDays(t *testing.T) {
f := newTestLister(t)
entries, err := days(context.Background(), f, "potato/", []string{"", "2020"})
require.NoError(t, err)
assert.Equal(t, 366, len(entries))
assert.Equal(t, "potato/2020-01-01", entries[0].Remote())
assert.Equal(t, "potato/2020-12-31", entries[len(entries)-1].Remote())
}
func TestPatternYearMonthDayFilter(t *testing.T) {
ctx := context.Background()
f := newTestLister(t)
// Years
sf, err := yearMonthDayFilter(ctx, f, []string{"", "2000"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "999"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "4000"})
require.Error(t, err)
// Months
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Month: 1,
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "0"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "13"})
require.Error(t, err)
// Days
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "02"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Day: 2,
Month: 1,
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "0"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "32"})
require.Error(t, err)
}
func TestPatternAlbumsToEntries(t *testing.T) {
f := newTestLister(t)
ctx := context.Background()
_, err := albumsToEntries(ctx, f, false, "potato/", "sub")
assert.Equal(t, fs.ErrorDirNotFound, err)
f.albums.add(&api.Album{
ID: "1",
Title: "sub/one",
})
entries, err := albumsToEntries(ctx, f, false, "potato/", "sub")
assert.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "potato/one", entries[0].Remote())
_, ok := entries[0].(fs.Directory)
assert.Equal(t, true, ok)
f.albums.add(&api.Album{
ID: "1",
Title: "sub",
})
f.names = []string{"file.jpg"}
entries, err = albumsToEntries(ctx, f, false, "potato/", "sub")
assert.NoError(t, err)
assert.Equal(t, 2, len(entries))
assert.Equal(t, "potato/one", entries[0].Remote())
_, ok = entries[0].(fs.Directory)
assert.Equal(t, true, ok)
assert.Equal(t, "potato/file.jpg", entries[1].Remote())
_, ok = entries[1].(fs.Object)
assert.Equal(t, true, ok)
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Some files were not shown because too many files have changed in this diff Show More