1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-26 13:13:31 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
77b1eaeffe drive: implement --drive-untrash flag - FIXME needs docs 2018-07-18 22:35:56 +01:00
8068 changed files with 5206781 additions and 350308 deletions

View File

@@ -2,10 +2,7 @@ version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\rclone\rclone
cache:
- '%LocalAppData%\go-build'
clone_folder: c:\gopath\src\github.com\ncw\rclone
environment:
GOPATH: C:\gopath
@@ -16,7 +13,7 @@ environment:
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
PATH: '%PATHCC64%'
RCLONE_CONFIG_PASS:
secure: sq9CPBbwaeKJv+yd24U44neORYPQVy6jsjnQptC+5yk=
secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0=
install:
- choco install winfsp -y
@@ -46,4 +43,4 @@ artifacts:
- path: build/*-v*.zip
deploy_script:
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
- IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload

View File

@@ -1,4 +1,3 @@
---
version: 2
jobs:
@@ -6,7 +5,7 @@ jobs:
build:
machine: true
working_directory: ~/.go_workspace/src/github.com/rclone/rclone
working_directory: ~/.go_workspace/src/github.com/ncw/rclone
steps:
- checkout
@@ -14,10 +13,10 @@ jobs:
- run:
name: Cross-compile rclone
command: |
docker pull rclone/xgo-cgofuse
docker pull billziss/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
--image=rclone/xgo-cgofuse \
--image=billziss/xgo-cgofuse \
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
.
@@ -30,21 +29,6 @@ jobs:
command: |
mkdir -p /tmp/rclone.dist
cp -R rclone-* /tmp/rclone.dist
mkdir build
cp -R rclone-* build/
- run:
name: Build rclone
command: |
go version
go build
- run:
name: Upload artifacts
command: |
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
make circleci_upload
fi
- store_artifacts:
path: /tmp/rclone.dist

7
.gitattributes vendored
View File

@@ -1,7 +0,0 @@
# Ignore generated files in GitHub language statistics and diffs
/MANUAL.* linguist-generated=true
/rclone.1 linguist-generated=true
# Don't fiddle with the line endings of test data
**/testdata/** -text
**/test/** -text

View File

@@ -1,31 +0,0 @@
<!--
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
https://forum.rclone.org/
instead of filing an issue for a quick response.
If you are reporting a bug or asking for a new feature then please use one of the templates here:
https://github.com/rclone/rclone/issues/new
otherwise fill in the form below.
Thank you
The Rclone Developers
-->
#### Output of `rclone version`
#### Describe the issue

View File

@@ -1,50 +0,0 @@
---
name: Bug report
about: Report a problem with rclone
---
<!--
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
https://forum.rclone.org/
instead of filing an issue for a quick response.
If you think you might have found a bug, please can you try to replicate it with the latest beta?
https://beta.rclone.org/
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
Thank you
The Rclone Developers
-->
#### What is the problem you are having with rclone?
#### What is your rclone version (output from `rclone version`)
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which cloud storage system are you using? (eg Google Drive)
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)

View File

@@ -1,36 +0,0 @@
---
name: Feature request
about: Suggest a new feature or enhancement for rclone
---
<!--
Welcome :-)
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
Here is a checklist of things to do:
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
2. Discuss on the forum first: https://forum.rclone.org/
3. Make a feature request issue (this is the right place!).
4. Be prepared to get involved making the feature :-)
Looking forward to your great idea!
The Rclone Developers
-->
#### What is your current rclone version (output from `rclone version`)?
#### What problem are you are trying to solve?
#### How do you think rclone should be changed to solve that?

View File

@@ -1,29 +0,0 @@
<!--
Thank you very much for contributing code or documentation to rclone! Please
fill out the following questions to make it easier for us to review your
changes.
You do not need to check all the boxes below all at once, feel free to take
your time and add more commits. If you're done and ready for review, please
check the last box.
-->
#### What is the purpose of this change?
<!--
Describe the changes here
-->
#### Was the change discussed in an issue or in the forum before?
<!--
Link issues and relevant forum posts here.
-->
#### Checklist
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
- [ ] I have added tests for all changes in this PR if appropriate.
- [ ] I have added documentation for the changes if appropriate.
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
- [ ] I'm done, this Pull Request is ready for review :-)

3
.gitignore vendored
View File

@@ -5,6 +5,3 @@ build
docs/public
rclone.iml
.idea
.history
*.test
*.log

View File

@@ -1,26 +0,0 @@
# golangci-lint configuration options
linters:
enable:
- deadcode
- errcheck
- goimports
- golint
- ineffassign
- structcheck
- varcheck
- govet
- unconvert
#- prealloc
#- maligned
disable-all: true
issues:
# Enable some lints excluded by default
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0

14
.gometalinter.json Normal file
View File

@@ -0,0 +1,14 @@
{
"Enable": [
"deadcode",
"errcheck",
"goimports",
"golint",
"ineffassign",
"structcheck",
"varcheck",
"vet"
],
"EnableGC": true,
"Vendor": true
}

View File

@@ -1,128 +1,50 @@
---
language: go
sudo: required
dist: xenial
dist: trusty
os:
- linux
go_import_path: github.com/rclone/rclone
- linux
go:
- 1.7.6
- 1.8.7
- 1.9.3
- "1.10.1"
- tip
before_install:
- git fetch --unshallow --tags
- |
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
fi
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
brew update
brew tap caskroom/cask
brew cask install osxfuse
fi
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
choco install -y winfsp zip make
cd ../.. # fix crlf in git checkout
mv $TRAVIS_REPO_SLUG _old
git config --global core.autocrlf false
git clone _old $TRAVIS_REPO_SLUG
cd $TRAVIS_REPO_SLUG
fi
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
install:
- make vars
- git fetch --unshallow --tags
- make vars
- make build_dep
script:
- make check
- make quicktest
- make compile_all
env:
global:
- GOTAGS=cmount
- GOMAXPROCS=8 # workaround for cmd/mount tests locking up - see #3154
- GO111MODULE=off
- GITHUB_USER=ncw
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
addons:
apt:
packages:
- fuse
- libfuse-dev
- rpm
- pkg-config
cache:
directories:
- $HOME/.cache/go-build
- fuse
- libfuse-dev
- rpm
- pkg-config
matrix:
allow_failures:
- go: tip
- go: tip
include:
- go: 1.9.x
script:
- make quicktest
- go: 1.10.x
script:
- make quicktest
- go: 1.11.x
script:
- make quicktest
- go: 1.12.x
name: Linux
env:
- GOTAGS=cmount
- BUILD_FLAGS='-include "^linux/"'
- DEPLOY=true
script:
- make build_dep
- make check
- make quicktest
- go: 1.12.x
name: Go Modules / Race
env:
- GO111MODULE=on
- GOPROXY=https://proxy.golang.org
script:
- make quicktest
- make racequicktest
- go: 1.12.x
name: Other OS
env:
- DEPLOY=true
- BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"'
script:
- make
- make compile_all
- go: 1.12.x
name: macOS
os: osx
env:
- GOTAGS= # cmount doesn't work on osx travis for some reason
- BUILD_FLAGS='-include "^darwin/" -cgo'
- DEPLOY=true
cache:
directories:
- $HOME/Library/Caches/go-build
script:
- make
- make quicktest
- make racequicktest
# - os: windows
# name: Windows
# go: 1.12.x
# env:
# - GOTAGS=cmount
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
# - BUILD_FLAGS='-include "^windows/amd64" -cgo' # 386 doesn't build yet
# #filter_secrets: false # works around a problem with secrets under windows
# cache:
# directories:
# - ${LocalAppData}/go-build
# script:
# - make
# - make quicktest
# - make racequicktest
- go: tip
script:
- make quicktest
- os: osx
go: "1.10.1"
env: GOTAGS=""
deploy:
provider: script
script: make travis_beta
skip_cleanup: true
on:
repo: rclone/rclone
all_branches: true
condition: $TRAVIS_PULL_REQUEST == false && $DEPLOY == true
go: "1.10.1"
condition: $TRAVIS_PULL_REQUEST == false

View File

@@ -21,20 +21,20 @@ with the [latest beta of rclone](https://beta.rclone.org/):
## Submitting a pull request ##
If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub.
like to implement then please submit a pull request via Github.
If it is a big feature then make an issue first so it can be discussed.
You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone).
First in your web browser press the fork button on [rclone's Github
page](https://github.com/ncw/rclone).
Now in your terminal
go get -u github.com/rclone/rclone
cd $GOPATH/src/github.com/rclone/rclone
go get github.com/ncw/rclone
cd $GOPATH/src/github.com/ncw/rclone
git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git
@@ -64,31 +64,22 @@ packages which you can install with
Make sure you
* Add [documentation](#writing-documentation) for a new feature.
* Follow the [commit message guidelines](#commit-messages).
* Add [unit tests](#testing) for a new feature
* Add documentation for a new feature (see below for where)
* Add unit tests for a new feature
* squash commits down to one per feature
* rebase to master with `git rebase master`
* rebase to master `git rebase master`
When you are done with that
git push origin my-new-feature
git push origin my-new-feature
Go to the GitHub website and click [Create pull
Go to the Github website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, squash the commits,
rebase it to master then push it to GitHub with `--force`.
## Enabling CI for your fork ##
The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
websites, find your fork of rclone, and enable building there.
rebase it to master then push it to Github with `--force`.
## Testing ##
@@ -118,24 +109,17 @@ but they can be run against any of the remotes.
cd fs/sync
go test -v -remote TestDrive:
go test -v -remote TestDrive: -fast-list
go test -v -remote TestDrive: -subdir
cd fs/operations
go test -v -remote TestDrive:
If you want to use the integration test framework to run these tests
all together with an HTML report and test retries then from the
project root:
go install github.com/rclone/rclone/fstest/test_all
test_all -backend drive
If you want to run all the integration tests against all the remotes,
then change into the project root and run
make test
This command is run daily on the integration test server. You can
This command is run daily on the the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ##
@@ -182,21 +166,17 @@ with modules beneath.
* pacer - retries with backoff and paces operations
* readers - a selection of useful io.Readers
* rest - a thin abstraction over net/http for REST
* vendor - 3rd party code managed by `go mod`
* vendor - 3rd party code managed by the dep tool
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation ##
If you are adding a new feature then please update the documentation.
If you add a new general flag (not for a backend), then document it in
If you add a new flag, then if it is a general flag, document it in
`docs/content/docs.md` - the flags there are supposed to be in
alphabetical order.
If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field. The first line of this is used
for the flag help, the remainder is shown to the user in `rclone
config` and is added to the docs with `make backenddocs`.
alphabetical order. If it is a remote specific flag, then document it
in `docs/content/remote.md`.
The only documentation you need to edit are the `docs/content/*.md`
files. The MANUAL.*, rclone.1, web site etc are all auto generated
@@ -215,20 +195,14 @@ file.
## Commit messages ##
Please make the first line of your commit message a summary of the
change that a user (not a developer) of rclone would like to read, and
prefix it with the directory of the change followed by a colon. The
changelog gets made by looking at just these first lines so make it
good!
change, and prefix it with the directory of the change followed by a
colon. The changelog gets made by looking at just these first lines
so make it good!
If you have more to say about the commit, then enter a blank line and
carry on the description. Remember to say why the change was needed -
the commit itself shows what was changed.
Writing more is better than less. Comparing the behaviour before the
change to that after the change is very useful. Imagine you are
writing to yourself in 12 months time when you've forgotten everything
about what you just did and you need to get up to speed quickly.
If the change fixes an issue then write `Fixes #1234` in the commit
message. This can be on the subject line if it will fit. If you
don't want to close the associated issue just put `#1234` and the
@@ -255,53 +229,37 @@ Fixes #1498
## Adding a dependency ##
rclone uses the [go
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
support in go1.11 and later to manage its dependencies.
rclone uses the [dep](https://github.com/golang/dep) tool to manage
its dependencies. All code that rclone needs for building is stored
in the `vendor` directory for perfectly reproducable builds.
**NB** you must be using go1.11 or above to add a dependency to
rclone. Rclone will still build with older versions of go, but we use
the `go mod` command for dependencies which is only in go1.11 and
above.
The `vendor` directory is entirely managed by the `dep` tool.
rclone can be built with modules outside of the GOPATH, but for
backwards compatibility with older go versions, rclone also maintains
a `vendor` directory with all the external code rclone needs for
building.
To add a new dependency, run `dep ensure` and `dep` will pull in the
new dependency to the `vendor` directory and update the `Gopkg.lock`
file.
The `vendor` directory is entirely managed by the `go mod` tool, do
not add things manually.
You can add constraints on that package in the `Gopkg.toml` file (see
the `dep` documentation), but don't unless you really need to.
To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency, add it to
`go.mod` and `go.sum` and vendor it for older go versions.
GO111MODULE=on go get github.com/ncw/new_dependency
GO111MODULE=on go mod vendor
You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to.
Please check in the changes generated by `go mod` including the
`vendor` directory and `go.mod` and `go.sum` in a single commit
separate from any other code changes with the title "vendor: add
github.com/ncw/new_dependency". Remember to `git add` any new files
in `vendor`.
Please check in the changes generated by `dep` including the `vendor`
directory and `Godep.toml` and `Godep.lock` in a single commit
separate from any other code changes. Watch out for new files in
`vendor`.
## Updating a dependency ##
If you need to update a dependency then run
GO111MODULE=on go get -u github.com/pkg/errors
GO111MODULE=on go mod vendor
dep ensure -update github.com/pkg/errors
Check in in a single commit as above.
## Updating all the dependencies ##
In order to update all the dependencies then run `make update`. This
just uses the go modules to update all the modules to their latest
stable release. Check in the changes in a single commit as above.
just runs `dep ensure -update`. Check in the changes in a single
commit as above.
This should be done early in the release cycle to pick up new versions
of packages in time for them to get some testing.
@@ -350,29 +308,26 @@ Unit tests
Integration tests
* Add your backend to `fstest/test_all/config.yaml`
* Once you've done that then you can use the integration test framework from the project root:
* go install ./...
* test_all -backend remote
Or if you want to run the integration tests manually:
* Add your fs to `fstest/test_all/test_all.go`
* Make sure integration tests pass with
* `cd fs/operations`
* `go test -v -remote TestRemote:`
* `cd fs/sync`
* `go test -v -remote TestRemote:`
* If your remote defines `ListR` check with this also
* If you are making a bucket based remote, then check with this also
* `go test -v -remote TestRemote: -subdir`
* And if your remote defines `ListR` this also
* `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests.
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
* `README.md` - main GitHub page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
* `README.md` - main Github page
* `docs/content/remote.md` - main docs page
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/about.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant
* `cmd/cmd.go` - the main help for rclone

463
Gopkg.lock generated Normal file
View File

@@ -0,0 +1,463 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
name = "bazil.org/fuse"
packages = [
".",
"fs",
"fuseutil"
]
revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e"
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata"]
revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479"
version = "v0.23.0"
[[projects]]
name = "github.com/Azure/azure-pipeline-go"
packages = ["pipeline"]
revision = "7571e8eb0876932ab505918ff7ed5107773e5ee2"
version = "0.1.7"
[[projects]]
branch = "master"
name = "github.com/Azure/azure-storage-blob-go"
packages = ["2018-03-28/azblob"]
revision = "eaae161d9d5e07363f04ddb19d84d57efc66d1a1"
[[projects]]
branch = "master"
name = "github.com/Unknwon/goconfig"
packages = ["."]
revision = "ef1e4c783f8f0478bd8bff0edb3dd0bade552599"
[[projects]]
name = "github.com/VividCortex/ewma"
packages = ["."]
revision = "b24eb346a94c3ba12c1da1e564dbac1b498a77ce"
version = "v1.1.1"
[[projects]]
branch = "master"
name = "github.com/a8m/tree"
packages = ["."]
revision = "3cf936ce15d6100c49d9c75f79c220ae7e579599"
[[projects]]
name = "github.com/abbot/go-http-auth"
packages = ["."]
revision = "0ddd408d5d60ea76e320503cc7dd091992dee608"
version = "v0.4.0"
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
"aws/awserr",
"aws/awsutil",
"aws/client",
"aws/client/metadata",
"aws/corehandlers",
"aws/credentials",
"aws/credentials/ec2rolecreds",
"aws/credentials/endpointcreds",
"aws/credentials/stscreds",
"aws/csm",
"aws/defaults",
"aws/ec2metadata",
"aws/endpoints",
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/sdkio",
"internal/sdkrand",
"internal/shareddefaults",
"private/protocol",
"private/protocol/eventstream",
"private/protocol/eventstream/eventstreamapi",
"private/protocol/query",
"private/protocol/query/queryutil",
"private/protocol/rest",
"private/protocol/restxml",
"private/protocol/xml/xmlutil",
"service/s3",
"service/s3/s3iface",
"service/s3/s3manager",
"service/sts"
]
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
version = "v1.14.8"
[[projects]]
name = "github.com/billziss-gh/cgofuse"
packages = ["fuse"]
revision = "ea66f9809c71af94522d494d3d617545662ea59d"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/coreos/bbolt"
packages = ["."]
revision = "af9db2027c98c61ecd8e17caa5bd265792b9b9a2"
[[projects]]
name = "github.com/cpuguy83/go-md2man"
packages = ["md2man"]
revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1"
version = "v1.0.8"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/djherbis/times"
packages = ["."]
revision = "95292e44976d1217cf3611dc7c8d9466877d3ed5"
version = "v1.0.1"
[[projects]]
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
packages = [
"dropbox",
"dropbox/async",
"dropbox/common",
"dropbox/file_properties",
"dropbox/files",
"dropbox/seen_state",
"dropbox/sharing",
"dropbox/team_common",
"dropbox/team_policies",
"dropbox/users",
"dropbox/users_common"
]
revision = "7afa861bfde5a348d765522b303b6fbd9d250155"
version = "v4.1.0"
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
version = "v1.37.0"
[[projects]]
name = "github.com/golang/protobuf"
packages = ["proto"]
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/google/go-querystring"
packages = ["query"]
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
[[projects]]
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
branch = "master"
name = "github.com/jlaffaye/ftp"
packages = ["."]
revision = "2403248fa8cc9f7909862627aa7337f13f8e0bf1"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "0b12d6b5"
[[projects]]
branch = "master"
name = "github.com/kardianos/osext"
packages = ["."]
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
[[projects]]
name = "github.com/kr/fs"
packages = ["."]
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
version = "v0.1.0"
[[projects]]
name = "github.com/mattn/go-runewidth"
packages = ["."]
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
version = "v0.0.2"
[[projects]]
branch = "master"
name = "github.com/ncw/go-acd"
packages = ["."]
revision = "887eb06ab6a255fbf5744b5812788e884078620a"
[[projects]]
name = "github.com/ncw/swift"
packages = ["."]
revision = "b2a7479cf26fa841ff90dd932d0221cb5c50782d"
version = "v1.0.39"
[[projects]]
branch = "master"
name = "github.com/nsf/termbox-go"
packages = ["."]
revision = "5c94acc5e6eb520f1bcd183974e01171cc4c23b3"
[[projects]]
branch = "master"
name = "github.com/okzk/sdnotify"
packages = ["."]
revision = "ed8ca104421a21947710335006107540e3ecb335"
[[projects]]
name = "github.com/patrickmn/go-cache"
packages = ["."]
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
version = "v2.1.0"
[[projects]]
name = "github.com/pengsrc/go-shared"
packages = [
"buffer",
"check",
"convert",
"log",
"reopen"
]
revision = "807ee759d82c84982a89fb3dc875ef884942f1e5"
version = "v0.2.0"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
name = "github.com/pkg/sftp"
packages = ["."]
revision = "57673e38ea946592a59c26592b7e6fbda646975b"
version = "1.8.0"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
name = "github.com/rfjakob/eme"
packages = ["."]
revision = "01668ae55fe0b79a483095689043cce3e80260db"
version = "v1.1"
[[projects]]
name = "github.com/russross/blackfriday"
packages = ["."]
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
version = "v1.5.1"
[[projects]]
branch = "master"
name = "github.com/sevlyar/go-daemon"
packages = ["."]
revision = "f9261e73885de99b1647d68bedadf2b9a99ad11f"
[[projects]]
branch = "master"
name = "github.com/skratchdot/open-golang"
packages = ["open"]
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
[[projects]]
name = "github.com/spf13/cobra"
packages = [
".",
"doc"
]
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
version = "v0.0.3"
[[projects]]
name = "github.com/spf13/pflag"
packages = ["."]
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
[[projects]]
name = "github.com/stretchr/testify"
packages = [
"assert",
"require"
]
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
version = "v1.2.2"
[[projects]]
branch = "master"
name = "github.com/t3rm1n4l/go-mega"
packages = ["."]
revision = "57978a63bd3f91fa7e188b751a7e7e6dd4e33813"
[[projects]]
branch = "master"
name = "github.com/xanzy/ssh-agent"
packages = ["."]
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
[[projects]]
name = "github.com/yunify/qingstor-sdk-go"
packages = [
".",
"config",
"logger",
"request",
"request/builder",
"request/data",
"request/errors",
"request/signer",
"request/unpacker",
"service",
"utils"
]
revision = "4f9ac88c5fec7350e960aabd0de1f1ede0ad2895"
version = "v2.2.14"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
"blowfish",
"curve25519",
"ed25519",
"ed25519/internal/edwards25519",
"internal/chacha20",
"internal/subtle",
"nacl/secretbox",
"pbkdf2",
"poly1305",
"salsa20/salsa",
"scrypt",
"ssh",
"ssh/agent",
"ssh/terminal"
]
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"html",
"html/atom",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"publicsuffix",
"webdav",
"webdav/internal/xml",
"websocket"
]
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [
".",
"google",
"internal",
"jws",
"jwt"
]
revision = "1e0a3fa8ba9a5c9eb35c271780101fdaf1b205d7"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = [
"unix",
"windows"
]
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
[[projects]]
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
]
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "golang.org/x/time"
packages = ["rate"]
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
[[projects]]
branch = "master"
name = "google.golang.org/api"
packages = [
"drive/v3",
"gensupport",
"googleapi",
"googleapi/internal/uritemplates",
"storage/v1"
]
revision = "2eea9ba0a3d94f6ab46508083e299a00bbbc65f6"
[[projects]]
name = "google.golang.org/appengine"
packages = [
".",
"internal",
"internal/app_identity",
"internal/base",
"internal/datastore",
"internal/log",
"internal/modules",
"internal/remote_api",
"internal/urlfetch",
"urlfetch"
]
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
version = "v1.1.0"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "670cdb55138aa1394b4c8f87345e9be9c8105248edda4be7176dddee2a4f5d26"
solver-name = "gps-cdcl"
solver-version = 1

15
Gopkg.toml Normal file
View File

@@ -0,0 +1,15 @@
# pin this to master to pull in the macOS changes
# can likely remove for 1.43
[[override]]
branch = "master"
name = "github.com/sevlyar/go-daemon"
# pin this to master to pull in the fix for linux/mips
# can likely remove for 1.43
[[override]]
branch = "master"
name = "github.com/coreos/bbolt"
[[constraint]]
branch = "master"
name = "github.com/Azure/azure-storage-blob-go"

43
ISSUE_TEMPLATE.md Normal file
View File

@@ -0,0 +1,43 @@
<!--
Hi!
We understand you are having a problem with rclone or have an idea for an improvement - we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum
https://forum.rclone.org/
instead of filing an issue. We'll reply quickly and it won't increase our massive issue backlog.
If you think you might have found a bug, please can you try to replicate it with the latest beta?
https://beta.rclone.org/
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
If you have an idea for an improvement, then please search the old issues first and if you don't find your idea, make a new issue.
Thanks
The Rclone Developers
-->
#### What is the problem you are having with rclone?
#### What is your rclone version (eg output from `rclone -V`)
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which cloud storage system are you using? (eg Google Drive)
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)

View File

@@ -1,17 +1,12 @@
# Maintainers guide for rclone #
Current active maintainers of rclone are:
Current active maintainers of rclone are
| Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
* Nick Craig-Wood @ncw
* Stefan Breunig @breunigs
* Ishuah Kariuki @ishuah
* Remus Bunduc @remusb - cache subsystem maintainer
* Fabian Möller @B4dM4n
**This is a work in progress Draft**
@@ -51,7 +46,7 @@ The milestones have these meanings:
* Help wanted - blue sky stuff that might get moved up, or someone could help with
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
## Closing Tickets ##
@@ -61,7 +56,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
Try to process pull requests promptly!
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.

12613
MANUAL.html

File diff suppressed because it is too large Load Diff

13406
MANUAL.md

File diff suppressed because it is too large Load Diff

13698
MANUAL.txt

File diff suppressed because it is too large Load Diff

124
Makefile
View File

@@ -1,9 +1,5 @@
SHELL = bash
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(shell git rev-parse --abbrev-ref HEAD))
LAST_TAG := $(shell git describe --tags --abbrev=0)
ifeq ($(BRANCH),$(LAST_TAG))
BRANCH := master
endif
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
TAG_BRANCH := -$(BRANCH)
BRANCH_PATH := branch/
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
@@ -11,30 +7,26 @@ ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
BRANCH_PATH :=
endif
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
ifneq ($(TAG),$(LAST_TAG))
TAG := $(TAG)-beta
endif
LAST_TAG := $(shell git describe --tags --abbrev=0)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR)
endif
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
# Run full tests if go >= go1.9
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 9)')
BETA_PATH := $(BRANCH_PATH)$(TAG)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
# Pass in GOTAGS=xyz on the make command line to set build tags
ifdef GOTAGS
BUILDTAGS=-tags "$(GOTAGS)"
LINTTAGS=--build-tags "$(GOTAGS)"
endif
.PHONY: rclone vars version
rclone:
touch fs/version.go
go install -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
cp -av `go env GOPATH`/bin/rclone .
vars:
@@ -44,6 +36,7 @@ vars:
@echo LAST_TAG="'$(LAST_TAG)'"
@echo NEW_TAG="'$(NEW_TAG)'"
@echo GO_VERSION="'$(GO_VERSION)'"
@echo FULL_TESTS="'$(FULL_TESTS)'"
@echo BETA_URL="'$(BETA_URL)'"
version:
@@ -51,26 +44,47 @@ version:
# Full suite of integration tests
test: rclone
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
-test_all 2>&1 | tee test_all.log
@echo "Written logs in test_all.log"
go install github.com/ncw/rclone/fstest/test_all
-go test -v -count 1 $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
@echo "Written logs in test.log and fs/test_all.log"
# Quick test
quicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
racequicktest:
ifdef FULL_TESTS
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
endif
# Do source code quality checks
check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------"
@golangci-lint run $(LINTTAGS) ./...
@echo "-- END CODE QUALITY REPORT ---------------------------------"
ifdef FULL_TESTS
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
errcheck $(BUILDTAGS) ./...
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
else
@echo Skipping source quality tests as version of go too old
endif
gometalinter_install:
go get -u github.com/alecthomas/gometalinter
gometalinter --install --update
# We aren't using gometalinter as the default linter yet because
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
# 2. can't get -printfuncs working with the vet linter
gometalinter:
gometalinter ./...
# Get the build dependencies
build_dep:
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
ifdef FULL_TESTS
go get -u github.com/kisielk/errcheck
go get -u golang.org/x/tools/cmd/goimports
go get -u github.com/golang/lint/golint
go get -u github.com/tools/godep
endif
# Get the release dependencies
release_dep:
@@ -79,16 +93,15 @@ release_dep:
# Update dependencies
update:
GO111MODULE=on go get -u ./...
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
go get -u github.com/golang/dep/cmd/dep
dep ensure -update -v
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
doc: rclone.1 MANUAL.html MANUAL.txt
rclone.1: MANUAL.md
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs
./bin/make_manual.py
MANUAL.html: MANUAL.md
@@ -98,10 +111,7 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
backenddocs: rclone bin/make_backend_docs.py
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
rclone gendocs docs/content/commands/
rcdocs: rclone
bin/make_rc_docs.sh
@@ -136,8 +146,8 @@ check_sign:
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
upload:
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
rclone -v copy --exclude '*current*' build/ memstore:downloads-rclone-org/$(TAG)
rclone -v copy --include '*current*' --include version.txt build/ memstore:downloads-rclone-org
upload_github:
./bin/upload-github $(TAG)
@@ -146,15 +156,19 @@ cross: doc
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
beta:
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)β
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)β
@echo Beta release ready at https://pub.rclone.org/$(TAG)%CE%B2/
log_since_last_release:
git log $(LAST_TAG)..
compile_all:
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
ifdef FULL_TESTS
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)β
else
@echo Skipping compile all as version of go too old
endif
appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
@@ -163,28 +177,29 @@ ifndef BRANCH_PATH
endif
@echo Beta release ready at $(BETA_URL)
circleci_upload:
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifndef BRANCH_PATH
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
BUILD_FLAGS := -exclude "^(windows|darwin)/"
ifeq ($(TRAVIS_OS_NAME),osx)
BUILD_FLAGS := -include "^darwin/" -cgo
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
travis_beta:
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
ifeq ($(TRAVIS_OS_NAME),linux)
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
endif
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)β
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
# Fetch the binary builds from travis and appveyor
fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
# Fetch the windows builds from appveyor
fetch_windows:
rclone -v copy --include 'rclone-v*-windows-*.zip' $(BETA_UPLOAD) build/
-#cp -av build/rclone-v*-windows-386.zip build/rclone-current-windows-386.zip
-#cp -av build/rclone-v*-windows-amd64.zip build/rclone-current-windows-amd64.zip
md5sum build/rclone-*-windows-*.zip | sort
serve: website
cd docs && hugo server -v -w
@@ -195,10 +210,10 @@ tag: doc
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
mv docs/content/changelog.md.new docs/content/changelog.md
@echo "Edit the new changelog in docs/content/changelog.md"
@echo "Then commit all the changes"
@echo " * $(NEW_TAG) -" `date -I` >> docs/content/changelog.md
@git log $(LAST_TAG)..$(NEW_TAG) --oneline >> docs/content/changelog.md
@echo "Then commit the changes"
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
@echo "And finally run make retag before make cross etc"
@@ -211,3 +226,4 @@ startdev:
winzip:
zip -9 rclone-$(TAG).zip rclone.exe

122
README.md
View File

@@ -1,103 +1,61 @@
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
[![Logo](https://rclone.org/img/rclone-120x120.png)](https://rclone.org/)
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
[Download](https://rclone.org/downloads/) |
[Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/)
[G+](https://google.com/+RcloneOrg)
[![Build Status](https://travis-ci.org/rclone/rclone.svg?branch=master)](https://travis-ci.org/rclone/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/rclone/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/rclone/rclone)
[![Build Status](https://dev.azure.com/rclone/rclone/_apis/build/status/rclone.rclone?branchName=master)](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master)
[![CircleCI](https://circleci.com/gh/rclone/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/rclone/rclone/tree/master)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/ncw/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/ncw/rclone)
[![CircleCI](https://circleci.com/gh/ncw/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/ncw/rclone/tree/master)
[![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
# Rclone
Rclone is a command line program to sync files and directories to and from
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
* Amazon Drive ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
* Backblaze B2
* Box
* Dropbox
* FTP
* Google Cloud Storage
* Google Drive
* HTTP
* Hubic
* Mega
* Microsoft Azure Blob Storage
* Microsoft OneDrive
* OpenDrive
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
* pCloud
* QingStor
* SFTP
* Webdav / Owncloud / Nextcloud
* Yandex Disk
* The local filesystem
## Storage providers
Features
* 1Fichier [:page_facing_up:](https://rclone.org/ficher/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
## Features
* MD5/SHA-1 hashes checked at all times for file integrity
* MD5/SHA1 hashes checked at all times for file integrity
* Timestamps preserved on files
* Partial syncs supported on a whole file basis
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
* Copy mode to just copy new/changed files
* Sync (one way) mode to make a directory identical
* Check mode to check for file hash equality
* Can sync to and from network, eg two different cloud accounts
* Optional encryption (Crypt)
* Optional FUSE mount
## Installation & documentation
See the home page for installation, usage, documentation, changelog
and configuration walkthroughs.
Please see the [rclone website](https://rclone.org/) for:
* [Installation](https://rclone.org/install/)
* [Documentation & configuration](https://rclone.org/docs/)
* [Changelog](https://rclone.org/changelog/)
* [FAQ](https://rclone.org/faq/)
* [Storage providers](https://rclone.org/overview/)
* [Forum](https://forum.rclone.org/)
* ...and more
## Downloads
* https://rclone.org/downloads/
* https://rclone.org/
License
-------
This is free software under the terms of MIT the license (check the
[COPYING file](/COPYING) included in this package).
COPYING file included in this package).

View File

@@ -11,11 +11,16 @@ Making a release
* edit docs/content/changelog.md
* make doc
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0"
* git commit -a -v -m "Version v1.XX"
* make retag
* make release_dep
* # Set the GOPATH for a current stable go compiler
* make cross
* git checkout docs/content/commands # to undo date changes in commands
* git push --tags origin master
* # Wait for the appveyor and travis builds to complete then...
* make fetch_binaries
* git push --tags origin master:stable # update the stable branch for packager.io
* # Wait for the appveyor and travis builds to complete then fetch the windows binaries from appveyor
* make fetch_windows
* make tarball
* make sign_upload
* make check_sign
@@ -26,46 +31,11 @@ Making a release
* # announce with forum post, twitter post, G+ post
Early in the next release cycle update the vendored dependencies
* Review any pinned packages in go.mod and remove if possible
* GO111MODULE=on go get -u github.com/spf13/cobra@master
* Review any pinned packages in Gopkg.toml and remove if possible
* make update
* git status
* git add new files
* carry forward any patches to vendor stuff
* git commit -a -v
If `make update` fails with errors like this:
```
# github.com/cpuguy83/go-md2man/md2man
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
```
Can be fixed with
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
* GO111MODULE=on go mod tidy
* GO111MODULE=on go mod vendor
Making a point release. If rclone needs a point release due to some
horrendous bug, then
* git branch v1.XX v1.XX-fixes
* git cherry-pick any fixes
* Test (see above)
* make NEW_TAG=v1.XX.1 tag
* edit docs/content/changelog.md
* make TAG=v1.43.1 doc
* git commit -a -v -m "Version v1.XX.1"
* git tag -d -v1.XX.1
* git tag -s -m "Version v1.XX.1" v1.XX.1
* git push --tags -u origin v1.XX-fixes
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
* make TAG=v1.43.1 tarball
* make TAG=v1.43.1 sign_upload
* make TAG=v1.43.1 check_sign
* make TAG=v1.43.1 upload
* make TAG=v1.43.1 upload_website
* make TAG=v1.43.1 upload_github
* NB this overwrites the current beta so after the release, rebuild the last travis build
* Announce!
Make the version number be just in a file?

View File

@@ -1,239 +0,0 @@
---
# Azure pipelines build for rclone
# Parts stolen shamelessly from all round the Internet, especially Caddy
# -*- compile-command: "yamllint -f parsable azure-pipelines.yml" -*-
trigger:
branches:
include:
- '*'
tags:
include:
- '*'
variables:
GOROOT: $(gorootDir)/go
GOPATH: $(system.defaultWorkingDirectory)/gopath
GOCACHE: $(system.defaultWorkingDirectory)/gocache
GOBIN: $(GOPATH)/bin
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
GO111MODULE: 'off'
GOTAGS: cmount
GO_LATEST: false
CPATH: ''
GO_INSTALL_ARCH: amd64
strategy:
matrix:
linux:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: latest
GOTAGS: cmount
BUILD_FLAGS: '-include "^linux/"'
MAKE_CHECK: true
MAKE_QUICKTEST: true
DEPLOY: true
mac:
imageName: macos-10.13
gorootDir: /usr/local
GO_VERSION: latest
GOTAGS: "" # cmount doesn't work on osx travis for some reason
BUILD_FLAGS: '-include "^darwin/" -cgo'
MAKE_QUICKTEST: true
MAKE_RACEQUICKTEST: true
DEPLOY: true
windows_amd64:
imageName: windows-2019
gorootDir: C:\
GO_VERSION: latest
BUILD_FLAGS: '-include "^windows/amd64" -cgo'
MAKE_QUICKTEST: true
DEPLOY: true
windows_386:
imageName: windows-2019
gorootDir: C:\
GO_VERSION: latest
GO_INSTALL_ARCH: 386
BUILD_FLAGS: '-include "^windows/386" -cgo'
MAKE_QUICKTEST: true
DEPLOY: true
other_os:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: latest
BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"'
MAKE_COMPILE_ALL: true
DEPLOY: true
modules_race:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: latest
GO111MODULE: on
GOPROXY: https://proxy.golang.org
MAKE_QUICKTEST: true
MAKE_RACEQUICKTEST: true
go1.9:
imageName: ubuntu-16.04
gorootDir: /usr/local
GOCACHE: '' # build caching only came in go1.10
GO_VERSION: go1.9.7
MAKE_QUICKTEST: true
go1.10:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: go1.10.8
MAKE_QUICKTEST: true
go1.11:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: go1.11.12
MAKE_QUICKTEST: true
pool:
vmImage: $(imageName)
steps:
- bash: |
latestGo=$(curl "https://golang.org/VERSION?m=text")
echo "##vso[task.setvariable variable=GO_VERSION]$latestGo"
echo "##vso[task.setvariable variable=GO_LATEST]true"
echo "Latest Go version: $latestGo"
condition: eq( variables['GO_VERSION'], 'latest' )
continueOnError: false
displayName: "Get latest Go version"
- bash: |
sudo rm -f $(which go)
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
mkdir -p '$(modulePath)'
shopt -s extglob
shopt -s dotglob
mv !(gopath) '$(modulePath)'
continueOnError: false
displayName: Remove old Go, set GOBIN/GOROOT, and move project into GOPATH
- task: CacheBeta@0
inputs:
key: go-build-cache | "$(Agent.JobName)"
path: $(GOCACHE)
continueOnError: true
displayName: Cache go build
condition: ne( variables['GOCACHE'], '' )
# Install Libraries (varies by platform)
- bash: |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
condition: eq( variables['Agent.OS'], 'Linux' )
continueOnError: false
displayName: Install Libraries on Linux
- bash: |
brew update
brew tap caskroom/cask
brew cask install osxfuse
condition: eq( variables['Agent.OS'], 'Darwin' )
continueOnError: false
displayName: Install Libraries on macOS
- powershell: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
Write-Host "##vso[task.setvariable variable=CPATH]C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GO_INSTALL_ARCH -eq "386") {
choco install -y mingw --forcex86 --force
Write-Host "##vso[task.prependpath]C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
$path = (get-command mingw32-make.exe).Path
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
condition: eq( variables['Agent.OS'], 'Windows_NT' )
continueOnError: false
displayName: Install Libraries on Windows
# Install Go (this varies by platform)
- bash: |
wget "https://dl.google.com/go/$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
sudo mkdir $(gorootDir)
sudo chown ${USER}:${USER} $(gorootDir)
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
condition: eq( variables['Agent.OS'], 'Linux' )
continueOnError: false
displayName: Install Go on Linux
- bash: |
wget "https://dl.google.com/go/$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
condition: eq( variables['Agent.OS'], 'Darwin' )
continueOnError: false
displayName: Install Go on macOS
- powershell: |
$ProgressPreference = 'SilentlyContinue'
Write-Host "Downloading Go $(GO_VERSION) for $(GO_INSTALL_ARCH)"
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip", "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip")
Write-Host "Extracting Go"
Expand-Archive "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip" -DestinationPath "$(gorootDir)"
condition: eq( variables['Agent.OS'], 'Windows_NT' )
continueOnError: false
displayName: Install Go on Windows
# Display environment for debugging
- bash: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
printf "\n\nGo environment:\n\n"
go env
printf "\n\nRclone environment:\n\n"
make vars
printf "\n\nSystem environment:\n\n"
env
workingDirectory: '$(modulePath)'
displayName: Print Go version and environment
# Run Tests
- bash: |
make
make quicktest
workingDirectory: '$(modulePath)'
displayName: Run tests
condition: eq( variables['MAKE_QUICKTEST'], 'true' )
- bash: |
make racequicktest
workingDirectory: '$(modulePath)'
displayName: Race test
condition: eq( variables['MAKE_RACEQUICKTEST'], 'true' )
- bash: |
make build_dep
make check
workingDirectory: '$(modulePath)'
displayName: Code quality test
condition: eq( variables['MAKE_CHECK'], 'true' )
- bash: |
make
make compile_all
workingDirectory: '$(modulePath)'
displayName: Compile all architectures test
condition: eq( variables['MAKE_COMPILE_ALL'], 'true' )
- bash: |
make travis_beta
env:
RCLONE_CONFIG_PASS: $(RCLONE_CONFIG_PASS)
BETA_SUBDIR: 'azure_pipelines' # FIXME remove when removing travis/appveyor
workingDirectory: '$(modulePath)'
displayName: Deploy built binaries
condition: and( eq( variables['DEPLOY'], 'true' ), ne( variables['Build.Reason'], 'PullRequest' ) )

View File

@@ -2,19 +2,20 @@ package alias
import (
"errors"
"path"
"path/filepath"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "alias",
Description: "Alias for an existing remote",
Description: "Alias for a existing remote",
NewFs: NewFs,
Options: []fs.Option{{
Name: "remote",
@@ -30,7 +31,7 @@ type Options struct {
Remote string `config:"remote"`
}
// NewFs constructs an Fs from the path.
// NewFs contstructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -46,9 +47,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if strings.HasPrefix(opt.Remote, name+":") {
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
}
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
_, configName, fsPath, err := fs.ParseRemote(opt.Remote)
if err != nil {
return nil, err
}
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
root = path.Join(fsPath, filepath.ToSlash(root))
if configName == "local" {
return fs.NewFs(root)
}
return fs.NewFs(configName + ":" + root)
}

View File

@@ -1,16 +1,15 @@
package alias
import (
"context"
"fmt"
"path"
"path/filepath"
"sort"
"testing"
_ "github.com/rclone/rclone/backend/local" // pull in test backend
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
_ "github.com/ncw/rclone/backend/local" // pull in test backend
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/stretchr/testify/require"
)
@@ -70,7 +69,7 @@ func TestNewFS(t *testing.T) {
prepare(t, remoteRoot)
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
require.NoError(t, err, what)
gotEntries, err := f.List(context.Background(), test.fsList)
gotEntries, err := f.List(test.fsList)
require.NoError(t, err, what)
sort.Sort(gotEntries)
@@ -81,7 +80,7 @@ func TestNewFS(t *testing.T) {
wantEntry := test.entries[i]
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what)
_, isDir := gotEntry.(fs.Directory)
require.Equal(t, wantEntry.isDir, isDir, what)
}

View File

@@ -2,35 +2,28 @@ package all
import (
// Active file systems
_ "github.com/rclone/rclone/backend/alias"
_ "github.com/rclone/rclone/backend/amazonclouddrive"
_ "github.com/rclone/rclone/backend/azureblob"
_ "github.com/rclone/rclone/backend/b2"
_ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier"
_ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex"
_ "github.com/ncw/rclone/backend/alias"
_ "github.com/ncw/rclone/backend/amazonclouddrive"
_ "github.com/ncw/rclone/backend/azureblob"
_ "github.com/ncw/rclone/backend/b2"
_ "github.com/ncw/rclone/backend/box"
_ "github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive"
_ "github.com/ncw/rclone/backend/dropbox"
_ "github.com/ncw/rclone/backend/ftp"
_ "github.com/ncw/rclone/backend/googlecloudstorage"
_ "github.com/ncw/rclone/backend/http"
_ "github.com/ncw/rclone/backend/hubic"
_ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/mega"
_ "github.com/ncw/rclone/backend/onedrive"
_ "github.com/ncw/rclone/backend/opendrive"
_ "github.com/ncw/rclone/backend/pcloud"
_ "github.com/ncw/rclone/backend/qingstor"
_ "github.com/ncw/rclone/backend/s3"
_ "github.com/ncw/rclone/backend/sftp"
_ "github.com/ncw/rclone/backend/swift"
_ "github.com/ncw/rclone/backend/webdav"
_ "github.com/ncw/rclone/backend/yandex"
)

View File

@@ -12,7 +12,6 @@ we ignore assets completely!
*/
import (
"context"
"encoding/json"
"fmt"
"io"
@@ -22,18 +21,19 @@ import (
"strings"
"time"
acd "github.com/ncw/go-acd"
"github.com/ncw/go-acd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2"
)
@@ -97,42 +97,13 @@ func init() {
Hide: fs.OptionHideBoth,
Advanced: true,
}, {
Name: "upload_wait_per_gb",
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
Sometimes Amazon Drive gives an error when a file has been fully
uploaded but the file appears anyway after a little while. This
happens sometimes for files over 1GB in size and nearly every time for
files bigger than 10GB. This parameter controls the time rclone waits
for the file to appear.
The default value for this parameter is 3 minutes per GB, so by
default it will wait 3 minutes for every GB uploaded to see if the
file appears.
You can disable this feature by setting it to 0. This may cause
conflict errors as rclone retries the failed upload but the file will
most likely appear correctly eventually.
These values were determined empirically by observing lots of uploads
of big files for a range of file sizes.
Upload with the "-v" flag to see more info about what rclone is doing
in this situation.`,
Name: "upload_wait_per_gb",
Help: "Additional time per GB to wait after a failed complete upload to see if it appears.",
Default: fs.Duration(180 * time.Second),
Advanced: true,
}, {
Name: "templink_threshold",
Help: `Files >= this size will be downloaded via their tempLink.
Files this size or more will be downloaded via their "tempLink". This
is to work around a problem with Amazon Drive which blocks downloads
of files bigger than about 10GB. The default for this is 9GB which
shouldn't need to be changed.
To download files above this threshold, rclone requests a "tempLink"
which downloads the file through a temporary URL directly from the
underlying S3 storage.`,
Name: "templink_threshold",
Help: "Files >= this size will be downloaded via their tempLink.",
Default: defaultTempLinkThreshold,
Advanced: true,
}},
@@ -155,7 +126,7 @@ type Fs struct {
noAuthClient *http.Client // unauthenticated http client
root string // the path we are working on
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
pacer *pacer.Pacer // pacer for API calls
trueRootID string // ID of true root directory
tokenRenewer *oauthutil.Renew // renew the token on expiry
}
@@ -247,7 +218,6 @@ func filterRequest(req *http.Request) {
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -265,7 +235,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
log.Fatalf("Failed to configure Amazon Drive: %v", err)
}
c := acd.NewClient(oAuthClient)
@@ -274,7 +244,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: root,
opt: *opt,
c: c,
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
noAuthClient: fshttp.NewClient(fs.Config),
}
f.features = (&fs.Features{
@@ -309,20 +279,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.dirCache = dircache.New(root, f.trueRootID, f)
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
err = f.dirCache.FindRoot(false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
tempF.root = newRoot
newF := *f
newF.dirCache = dircache.New(newRoot, f.trueRootID, &newF)
newF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
err = newF.dirCache.FindRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
_, err := newF.newObjectWithInfo(remote, nil)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
@@ -330,13 +300,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
return nil, err
}
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
return &newF, fs.ErrorIsFile
}
return f, nil
}
@@ -354,7 +319,7 @@ func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -363,7 +328,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Nod
// Set info but not meta
o.info = info
} else {
err := o.readMetaData(ctx) // reads info and meta, returning an error
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
return nil, err
}
@@ -373,12 +338,12 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Nod
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response
@@ -405,7 +370,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response
@@ -503,12 +468,12 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
directoryID, err := f.dirCache.FindDir(dir, false)
if err != nil {
return nil, err
}
@@ -526,7 +491,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
d := fs.NewDir(remote, when).SetID(*node.Id)
entries = append(entries, d)
case fileKind:
o, err := f.newObjectWithInfo(ctx, remote, node)
o, err := f.newObjectWithInfo(remote, node)
if err != nil {
iErr = err
return true
@@ -570,7 +535,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// At the end of large uploads. The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well
// be properly uploaded.
func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
// Return if no error - all is well
if inErr == nil {
return false, inInfo, inErr
@@ -610,7 +575,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
remote := src.Remote()
for i := 1; i <= retries; i++ {
o, err := f.NewObject(ctx, remote)
o, err := f.NewObject(remote)
if err == fs.ErrorObjectNotFound {
fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
} else if err != nil {
@@ -636,7 +601,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
size := src.Size()
// Temporary Object under construction
@@ -645,17 +610,17 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
remote: remote,
}
// Check if object already exists
err := o.readMetaData(ctx)
err := o.readMetaData()
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
return o, o.Update(in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
default:
return nil, err
}
// If not create it
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true)
if err != nil {
return nil, err
}
@@ -671,7 +636,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
info, resp, err = folder.Put(in, leaf)
f.tokenRenewer.Stop()
var ok bool
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
ok, info, err = f.checkUpload(resp, in, src, info, err, time.Since(start))
if ok {
return false, nil
}
@@ -685,13 +650,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, true)
func (f *Fs) Mkdir(dir string) error {
err := f.dirCache.FindRoot(true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
_, err = f.dirCache.FindDir(dir, true)
}
return err
}
@@ -705,7 +670,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
srcObj, ok := src.(*Object)
if !ok {
@@ -714,15 +679,15 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// create the destination directory if necessary
err := f.dirCache.FindRoot(ctx, true)
err := f.dirCache.FindRoot(true)
if err != nil {
return nil, err
}
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(srcObj.remote, false)
if err != nil {
return nil, err
}
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true)
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(remote, true)
if err != nil {
return nil, err
}
@@ -738,12 +703,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcErr, dstErr error
)
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
_, srcErr = srcObj.fs.NewObject(srcObj.remote) // try reading the object
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
// exit if error on source
return nil, srcErr
}
dstObj, dstErr = f.NewObject(ctx, remote)
dstObj, dstErr = f.NewObject(remote)
if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
// exit if error on dst
return nil, dstErr
@@ -772,7 +737,7 @@ func (f *Fs) DirCacheFlush() {
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(src, "DirMove error: not same remote type")
@@ -788,14 +753,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// find the root src directory
err = srcFs.dirCache.FindRoot(ctx, false)
err = srcFs.dirCache.FindRoot(false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
err = f.dirCache.FindRoot(true)
if err != nil {
return err
}
@@ -810,14 +775,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if dstRemote == "" {
findPath = f.root
}
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true)
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
_, err = f.dirCache.FindDir(dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
@@ -833,7 +798,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if srcRemote == "" {
srcDirectoryID, err = srcFs.dirCache.RootParentID()
} else {
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
_, srcDirectoryID, err = srcFs.dirCache.FindPath(findPath, false)
}
if err != nil {
return err
@@ -841,7 +806,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
srcLeaf, _ := dircache.SplitPath(srcPath)
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
if err != nil {
return err
}
@@ -874,17 +839,17 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// purgeCheck remotes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
func (f *Fs) purgeCheck(dir string, check bool) error {
root := path.Join(f.root, dir)
if root == "" {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(ctx, false)
err := dc.FindRoot(false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false)
rootID, err := dc.FindDir(dir, false)
if err != nil {
return err
}
@@ -933,8 +898,8 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
func (f *Fs) Rmdir(dir string) error {
return f.purgeCheck(dir, true)
}
// Precision return the precision of this Fs
@@ -956,7 +921,7 @@ func (f *Fs) Hashes() hash.Set {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
//func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
// srcObj, ok := src.(*Object)
// if !ok {
// fs.Debugf(src, "Can't copy - not same remote type")
@@ -967,7 +932,7 @@ func (f *Fs) Hashes() hash.Set {
// if err != nil {
// return nil, err
// }
// return f.NewObject(ctx, remote), nil
// return f.NewObject(remote), nil
//}
// Purge deletes all the files and the container
@@ -975,8 +940,8 @@ func (f *Fs) Hashes() hash.Set {
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
func (f *Fs) Purge() error {
return f.purgeCheck("", false)
}
// ------------------------------------------------------------
@@ -1000,7 +965,7 @@ func (o *Object) Remote() string {
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
@@ -1023,11 +988,11 @@ func (o *Object) Size() int64 {
// it also sets the info
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (o *Object) readMetaData(ctx context.Context) (err error) {
func (o *Object) readMetaData() (err error) {
if o.info != nil {
return nil
}
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return fs.ErrorObjectNotFound
@@ -1056,8 +1021,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx)
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Debugf(o, "Failed to read metadata: %v", err)
return time.Now()
@@ -1071,7 +1036,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
func (o *Object) SetModTime(modTime time.Time) error {
// FIXME not implemented
return fs.ErrorCantSetModTime
}
@@ -1082,7 +1047,7 @@ func (o *Object) Storable() bool {
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
if bigObject {
fs.Debugf(o, "Downloading large object via tempLink")
@@ -1094,7 +1059,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if !bigObject {
in, resp, err = file.OpenHeaders(headers)
} else {
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
in, resp, err = file.OpenTempURLHeaders(rest.ClientWithHeaderReset(o.fs.noAuthClient, headers), headers)
}
return o.fs.shouldRetry(resp, err)
})
@@ -1104,7 +1069,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
file := acd.File{Node: o.info}
var info *acd.File
var resp *http.Response
@@ -1115,7 +1080,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
info, resp, err = file.Overwrite(in)
o.fs.tokenRenewer.Stop()
var ok bool
ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
ok, info, err = o.fs.checkUpload(resp, in, src, info, err, time.Since(start))
if ok {
return false, nil
}
@@ -1140,7 +1105,7 @@ func (f *Fs) removeNode(info *acd.Node) error {
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
func (o *Object) Remove() error {
return o.fs.removeNode(o.info)
}
@@ -1262,7 +1227,7 @@ OnConflict:
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
func (o *Object) MimeType() string {
if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
return *o.info.ContentProperties.ContentType
}
@@ -1275,38 +1240,24 @@ func (o *Object) MimeType(ctx context.Context) string {
// Automatically restarts itself in case of unexpected behaviour of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
checkpoint := f.opt.Checkpoint
quit := make(chan bool)
go func() {
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
fs.Debugf(f, "Unable to save checkpoint: %v", err)
}
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if pollInterval == 0 {
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
} else {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
fs.Debugf(f, "Unable to save checkpoint: %v", err)
}
case <-quit:
return
case <-time.After(pollInterval):
}
}
}()
return quit
}
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {

View File

@@ -7,9 +7,9 @@ package amazonclouddrive_test
import (
"testing"
"github.com/rclone/rclone/backend/amazonclouddrive"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/amazonclouddrive"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote

File diff suppressed because it is too large Load Diff

View File

@@ -1,18 +0,0 @@
// +build !plan9,!solaris
package azureblob
import (
"testing"
"github.com/stretchr/testify/assert"
)
func (f *Fs) InternalTest(t *testing.T) {
// Check first feature flags are set on this
// remote
enabled := f.Features().SetTier
assert.True(t, enabled)
enabled = f.Features().GetTier
assert.True(t, enabled)
}

View File

@@ -1,37 +1,20 @@
// Test AzureBlob filesystem interface
// +build !plan9,!solaris
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
package azureblob
package azureblob_test
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/azureblob"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
RemoteName: "TestAzureBlob:",
NilObject: (*azureblob.Object)(nil),
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -1,6 +1,6 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 solaris
// +build freebsd netbsd openbsd plan9 solaris !go1.8
package azureblob

View File

@@ -7,7 +7,7 @@ import (
"strings"
"time"
"github.com/rclone/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fserrors"
)
// Error describes a B2 error response
@@ -17,12 +17,12 @@ type Error struct {
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
}
// Error satisfies the error interface
// Error statisfies the error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
}
// Fatal satisfies the Fatal interface
// Fatal statisfies the Fatal interface
//
// It indicates which errors should be treated as fatal
func (e *Error) Fatal() bool {
@@ -31,6 +31,11 @@ func (e *Error) Fatal() bool {
var _ fserrors.Fataler = (*Error)(nil)
// Account describes a B2 account
type Account struct {
ID string `json:"accountId"` // The identifier for the account.
}
// Bucket describes a B2 bucket
type Bucket struct {
ID string `json:"bucketId"`
@@ -69,7 +74,7 @@ const versionFormat = "-v2006-01-02-150405.000"
func (t Timestamp) AddVersion(remote string) string {
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
s := time.Time(t).Format(versionFormat)
s := (time.Time)(t).Format(versionFormat)
// Replace the '.' with a '-'
s = strings.Replace(s, ".", "-", -1)
return base + s + ext
@@ -100,22 +105,22 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
return Timestamp(newT), base[:versionStart] + ext
}
// IsZero returns true if the timestamp is uninitialized
// IsZero returns true if the timestamp is unitialised
func (t Timestamp) IsZero() bool {
return time.Time(t).IsZero()
return (time.Time)(t).IsZero()
}
// Equal compares two timestamps
//
// If either are !IsZero then it returns false
func (t Timestamp) Equal(s Timestamp) bool {
if time.Time(t).IsZero() {
if (time.Time)(t).IsZero() {
return false
}
if time.Time(s).IsZero() {
if (time.Time)(s).IsZero() {
return false
}
return time.Time(t).Equal(time.Time(s))
return (time.Time)(t).Equal((time.Time)(s))
}
// File is info about a file
@@ -132,27 +137,10 @@ type File struct {
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct {
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
}
// ListBucketsRequest is parameters for b2_list_buckets call
type ListBucketsRequest struct {
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId,omitempty"` // When specified, the result will be a list containing just this bucket.
BucketName string `json:"bucketName,omitempty"` // When specified, the result will be a list containing just this bucket.
BucketTypes []string `json:"bucketTypes,omitempty"` // If present, B2 will use it as a filter for bucket types returned in the list buckets response.
AccountID string `json:"accountId"` // The identifier for the account.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
}
// ListBucketsResponse is as returned from the b2_list_buckets call
@@ -189,21 +177,6 @@ type GetUploadURLResponse struct {
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
}
// GetDownloadAuthorizationRequest is passed to b2_get_download_authorization
type GetDownloadAuthorizationRequest struct {
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
ValidDurationInSeconds int64 `json:"validDurationInSeconds"` // The number of seconds before the authorization token will expire. The minimum value is 1 second. The maximum value is 604800 which is one week in seconds.
B2ContentDisposition string `json:"b2ContentDisposition,omitempty"` // optional - If this is present, download requests using the returned authorization must include the same value for b2ContentDisposition.
}
// GetDownloadAuthorizationResponse is received from b2_get_download_authorization
type GetDownloadAuthorizationResponse struct {
BucketID string `json:"bucketId"` // The unique ID of the bucket.
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when downloading files, see b2_download_file_by_name.
}
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
type FileInfo struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
@@ -326,14 +299,3 @@ type CancelLargeFileResponse struct {
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket.
}
// CopyFileRequest is as passed to b2_copy_file
type CopyFileRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
Name string `json:"fileName"` // The name of the new file being created.
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
}

View File

@@ -4,8 +4,8 @@ import (
"testing"
"time"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fstest"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ import (
"testing"
"time"
"github.com/rclone/rclone/fstest"
"github.com/ncw/rclone/fstest"
)
// Test b2 string encoding

View File

@@ -1,34 +1,17 @@
// Test B2 filesystem interface
package b2
package b2_test
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/b2"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestB2:",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
NeedMultipleChunks: true,
},
NilObject: (*b2.Object)(nil),
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -6,7 +6,6 @@ package b2
import (
"bytes"
"context"
"crypto/sha1"
"encoding/hex"
"fmt"
@@ -15,12 +14,12 @@ import (
"strings"
"sync"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
)
type hashAppendingReader struct {
@@ -81,7 +80,7 @@ type largeUpload struct {
}
// newLargeUpload starts an upload of object o from in with metadata in src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
remote := o.remote
size := src.Size()
parts := int64(0)
@@ -99,29 +98,26 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
sha1SliceSize = parts
}
modTime := src.ModTime(ctx)
modTime := src.ModTime()
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
}
bucket, bucketPath := o.split()
bucketID, err := f.getBucketID(bucket)
bucketID, err := f.getBucketID()
if err != nil {
return nil, err
}
var request = api.StartLargeFileRequest{
BucketID: bucketID,
Name: bucketPath,
ContentType: fs.MimeType(ctx, src),
Name: o.fs.root + remote,
ContentType: fs.MimeType(src),
Info: map[string]string{
timeKey: timeString(modTime),
},
}
// Set the SHA1 if known
if !o.fs.opt.DisableCheckSum {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {

View File

@@ -45,7 +45,7 @@ type Error struct {
RequestID string `json:"request_id"`
}
// Error returns a string for the error and satisfies the error interface
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
if e.Message != "" {
@@ -57,11 +57,11 @@ func (e *Error) Error() string {
return out
}
// Check Error satisfies the error interface
// Check Error statisfies the error interface
var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
// Types of things in Item
const (
@@ -86,10 +86,6 @@ type Item struct {
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
}
// ModTime returns the modification time of the item
@@ -149,14 +145,6 @@ type CopyFile struct {
Parent Parent `json:"parent"`
}
// CreateSharedLink is the request for Public Link
type CreateSharedLink struct {
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
}
// UploadSessionRequest is uses in Create Upload Session
type UploadSessionRequest struct {
FolderID string `json:"folder_id,omitempty"` // don't pass for update
@@ -184,8 +172,8 @@ type UploadSessionResponse struct {
// Part defines the return from upload part call which are passed to commit upload also
type Part struct {
PartID string `json:"part_id"`
Offset int64 `json:"offset"`
Size int64 `json:"size"`
Offset int `json:"offset"`
Size int `json:"size"`
Sha1 string `json:"sha1"`
}

View File

@@ -10,7 +10,6 @@ package box
// FIXME box can copy a directory
import (
"context"
"fmt"
"io"
"log"
@@ -21,19 +20,19 @@ import (
"strings"
"time"
"github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
@@ -86,22 +85,16 @@ func init() {
Help: "Box App Client Secret\nLeave blank normally.",
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload (>= 50MB).",
Help: "Cutoff for switching to multipart upload.",
Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true,
}, {
Name: "commit_retries",
Help: "Max number of times to try committing a multipart file.",
Default: 100,
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CommitRetries int `config:"commit_retries"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
}
// Fs represents a remote box
@@ -112,7 +105,7 @@ type Fs struct {
features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
pacer *pacer.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency
}
@@ -127,7 +120,6 @@ type Object struct {
size int64 // size of the object
modTime time.Time // modification time of the object
id string // ID of the object
publicLink string // Public Link for the object
sha1 string // SHA-1 of the object content
}
@@ -172,13 +164,13 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
authRetry := false
authRety := false
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
authRetry = true
authRety = true
fs.Debugf(nil, "Should retry: %v", err)
}
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// substitute reserved characters for box
@@ -194,9 +186,9 @@ func restoreReservedChars(x string) string {
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
leaf, directoryID, err := f.dirCache.FindRootAndPath(path, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
@@ -239,7 +231,6 @@ func errorHandler(resp *http.Response) error {
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -254,7 +245,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
log.Fatalf("Failed to configure Box: %v", err)
}
f := &Fs{
@@ -262,7 +253,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
}
f.features = (&fs.Features{
@@ -273,7 +264,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
_, err := f.readMetaDataForPath("")
return err
})
@@ -281,20 +272,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.dirCache = dircache.New(root, rootID, f)
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
err = f.dirCache.FindRoot(false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot
newF := *f
newF.dirCache = dircache.New(newRoot, rootID, &newF)
newF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
err = newF.dirCache.FindRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
_, err := newF.newObjectWithInfo(remote, nil)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
@@ -302,14 +293,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
return nil, err
}
f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
return &newF, fs.ErrorIsFile
}
return f, nil
}
@@ -325,7 +310,7 @@ func (f *Fs) rootSlash() string {
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -335,7 +320,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Ite
// Set info
err = o.setMetaData(info)
} else {
err = o.readMetaData(ctx) // reads info and meta, returning an error
err = o.readMetaData() // reads info and meta, returning an error
}
if err != nil {
return nil, err
@@ -345,12 +330,12 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Ite
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
if item.Name == leaf {
@@ -370,7 +355,7 @@ func fieldsValue() url.Values {
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
var resp *http.Response
var info *api.Item
@@ -469,12 +454,12 @@ OUTER:
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
directoryID, err := f.dirCache.FindDir(dir, false)
if err != nil {
return nil, err
}
@@ -488,7 +473,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// FIXME more info from dir?
entries = append(entries, d)
} else if info.Type == api.ItemTypeFile {
o, err := f.newObjectWithInfo(ctx, remote, info)
o, err := f.newObjectWithInfo(remote, info)
if err != nil {
iErr = err
return true
@@ -512,9 +497,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Returns the object, leaf, directoryID and error
//
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true)
if err != nil {
return
}
@@ -531,22 +516,22 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
switch err {
case nil:
return existingObj, existingObj.Update(ctx, in, src, options...)
return exisitingObj, exisitingObj.Update(in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src)
return f.PutUnchecked(in, src)
default:
return nil, err
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// PutUnchecked the object into the container
@@ -556,26 +541,26 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
size := src.Size()
modTime := src.ModTime(ctx)
modTime := src.ModTime()
o, _, _, err := f.createObject(ctx, remote, modTime, size)
o, _, _, err := f.createObject(remote, modTime, size)
if err != nil {
return nil, err
}
return o, o.Update(ctx, in, src, options...)
return o, o.Update(in, src, options...)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, true)
func (f *Fs) Mkdir(dir string) error {
err := f.dirCache.FindRoot(true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
_, err = f.dirCache.FindDir(dir, true)
}
return err
}
@@ -595,17 +580,17 @@ func (f *Fs) deleteObject(id string) error {
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
func (f *Fs) purgeCheck(dir string, check bool) error {
root := path.Join(f.root, dir)
if root == "" {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(ctx, false)
err := dc.FindRoot(false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false)
rootID, err := dc.FindDir(dir, false)
if err != nil {
return err
}
@@ -635,8 +620,8 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
func (f *Fs) Rmdir(dir string) error {
return f.purgeCheck(dir, true)
}
// Precision return the precision of this Fs
@@ -653,13 +638,13 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
err := srcObj.readMetaData(ctx)
err := srcObj.readMetaData()
if err != nil {
return nil, err
}
@@ -671,7 +656,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
if err != nil {
return nil, err
}
@@ -683,7 +668,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
Parameters: fieldsValue(),
}
replacedLeaf := replaceReservedChars(leaf)
copyFile := api.CopyFile{
copy := api.CopyFile{
Name: replacedLeaf,
Parent: api.Parent{
ID: directoryID,
@@ -692,7 +677,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var resp *http.Response
var info *api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &copyFile, &info)
resp, err = f.srv.CallJSON(&opts, &copy, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -710,8 +695,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
func (f *Fs) Purge() error {
return f.purgeCheck("", false)
}
// move a file or folder
@@ -748,7 +733,7 @@ func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err e
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
@@ -756,7 +741,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
if err != nil {
return nil, err
}
@@ -782,7 +767,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -798,14 +783,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// find the root src directory
err := srcFs.dirCache.FindRoot(ctx, false)
err := srcFs.dirCache.FindRoot(false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
err = f.dirCache.FindRoot(true)
if err != nil {
return err
}
@@ -821,14 +806,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if dstRemote == "" {
findPath = f.root
}
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
leaf, directoryID, err = f.dirCache.FindPath(findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
_, err = f.dirCache.FindDir(dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
@@ -839,7 +824,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
if err != nil {
return err
}
@@ -853,46 +838,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
id, err := f.dirCache.FindDir(ctx, remote, false)
var opts rest.Opts
if err == nil {
fs.Debugf(f, "attempting to share directory '%s'", remote)
opts = rest.Opts{
Method: "PUT",
Path: "/folders/" + id,
Parameters: fieldsValue(),
}
} else {
fs.Debugf(f, "attempting to share single file '%s'", remote)
o, err := f.NewObject(ctx, remote)
if err != nil {
return "", err
}
if o.(*Object).publicLink != "" {
return o.(*Object).publicLink, nil
}
opts = rest.Opts{
Method: "PUT",
Path: "/files/" + o.(*Object).id,
Parameters: fieldsValue(),
}
}
shareLink := api.CreateSharedLink{}
var info api.Item
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
return shouldRetry(resp, err)
})
return info.SharedLink.URL, err
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {
@@ -930,7 +875,7 @@ func (o *Object) srvPath() string {
}
// Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.SHA1 {
return "", hash.ErrUnsupported
}
@@ -939,7 +884,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
err := o.readMetaData(context.TODO())
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return 0
@@ -957,18 +902,17 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
o.sha1 = info.SHA1
o.modTime = info.ModTime()
o.id = info.ID
o.publicLink = info.SharedLink.URL
return nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
func (o *Object) readMetaData() (err error) {
if o.hasMetaData {
return nil
}
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
info, err := o.fs.readMetaDataForPath(o.remote)
if err != nil {
if apiErr, ok := err.(*api.Error); ok {
if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
@@ -985,8 +929,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx)
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
@@ -995,7 +939,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
}
// setModTime sets the modification time of the local fs object
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
opts := rest.Opts{
Method: "PUT",
Path: "/files/" + o.id,
@@ -1013,8 +957,8 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
info, err := o.setModTime(ctx, modTime)
func (o *Object) SetModTime(modTime time.Time) error {
info, err := o.setModTime(modTime)
if err != nil {
return err
}
@@ -1027,7 +971,7 @@ func (o *Object) Storable() bool {
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.id == "" {
return nil, errors.New("can't download - no id")
}
@@ -1064,8 +1008,8 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
var resp *http.Response
var result api.FolderItems
opts := rest.Opts{
Method: "POST",
Body: in,
Method: "POST",
Body: in,
MultipartMetadataName: "attributes",
MultipartContentName: "contents",
MultipartFileName: upload.Name,
@@ -1095,16 +1039,16 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
// If existing is set then it updates the object rather than creating a new one
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
size := src.Size()
modTime := src.ModTime(ctx)
modTime := src.ModTime()
remote := o.Remote()
// Create the directory for the object if it doesn't exist
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(remote, true)
if err != nil {
return err
}
@@ -1119,7 +1063,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
func (o *Object) Remove() error {
return o.fs.deleteObject(o.id)
}
@@ -1137,7 +1081,6 @@ var (
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -4,8 +4,8 @@ package box_test
import (
"testing"
"github.com/rclone/rclone/backend/box"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/box"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote

View File

@@ -14,11 +14,11 @@ import (
"sync"
"time"
"github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/rest"
)
// createUploadSession creates an upload session for the object
@@ -96,9 +96,7 @@ func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.T
request.Attributes.ContentCreatedAt = api.Time(modTime)
var body []byte
var resp *http.Response
// For discussion of this value see:
// https://github.com/rclone/rclone/issues/2054
maxTries := o.fs.opt.CommitRetries
maxTries := fs.Config.LowLevelRetries
const defaultDelay = 10
var tries int
outer:
@@ -112,7 +110,7 @@ outer:
return shouldRetry(resp, err)
})
delay := defaultDelay
var why string
why := "unknown"
if err != nil {
// Sometimes we get 400 Error with
// parts_mismatch immediately after uploading
@@ -211,8 +209,8 @@ outer:
}
reqSize := remaining
if reqSize >= chunkSize {
reqSize = chunkSize
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
}
// Make a block of memory

620
backend/cache/cache.go vendored

File diff suppressed because it is too large Load Diff

View File

@@ -4,10 +4,6 @@ package cache_test
import (
"bytes"
"context"
"encoding/base64"
goflag "flag"
"fmt"
"io"
"io/ioutil"
"log"
@@ -16,27 +12,34 @@ import (
"path"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"testing"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
"github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/assert"
"encoding/base64"
goflag "flag"
"fmt"
"runtime/debug"
"encoding/json"
"net/http"
"github.com/ncw/rclone/backend/cache"
"github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/object"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fs/rc/rcflags"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/stretchr/testify/require"
)
@@ -122,7 +125,7 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
require.NoError(t, err)
listRootInner, err := runInstance.list(t, rootFs, innerFolder)
require.NoError(t, err)
listInner, err := rootFs2.List(context.Background(), "")
listInner, err := rootFs2.List("")
require.NoError(t, err)
require.Len(t, listRoot, 1)
@@ -140,10 +143,10 @@ func TestInternalVfsCache(t *testing.T) {
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test")
err := rootFs.Mkdir("test")
require.NoError(t, err)
runInstance.writeObjectString(t, rootFs, "test/second", "content")
_, err = rootFs.List(context.Background(), "test")
_, err = rootFs.List("test")
require.NoError(t, err)
testReader := runInstance.randomReader(t, testSize)
@@ -268,7 +271,7 @@ func TestInternalObjNotFound(t *testing.T) {
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
obj, err := rootFs.NewObject(context.Background(), "404")
obj, err := rootFs.NewObject("404")
require.Error(t, err)
require.Nil(t, obj)
}
@@ -356,8 +359,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
require.NoError(t, err)
} else {
testData1 = []byte(random.String(100))
testData2 = []byte(random.String(200))
testData1 = []byte(fstest.RandomString(100))
testData2 = []byte(fstest.RandomString(200))
}
// write the object
@@ -389,10 +392,10 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
// write the object
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
require.Equal(t, o.Size(), testSize)
require.Equal(t, o.Size(), int64(testSize))
time.Sleep(time.Second * 3)
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
require.NoError(t, err)
require.Equal(t, int64(len(checkSample)), o.Size())
@@ -447,7 +450,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
require.NoError(t, err)
log.Printf("original size: %v", originalSize)
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
expectedSize := int64(len([]byte("test content")))
var data2 []byte
@@ -459,7 +462,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
data2 = []byte("test content")
}
objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap())
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
err = o.Update(bytes.NewReader(data2), objInfo)
require.NoError(t, err)
require.Equal(t, int64(len(data2)), o.Size())
log.Printf("updated size: %v", len(data2))
@@ -505,9 +508,9 @@ func TestInternalMoveWithNotify(t *testing.T) {
} else {
testData = []byte("test content")
}
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test"))
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/one"))
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/second"))
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test"))
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/one"))
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/second"))
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
// list in mount
@@ -517,7 +520,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
require.NoError(t, err)
// move file
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
require.NoError(t, err)
err = runInstance.retryBlock(func() error {
@@ -591,9 +594,9 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
} else {
testData = []byte("test content")
}
err = rootFs.Mkdir(context.Background(), "test")
err = rootFs.Mkdir("test")
require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "test/one")
err = rootFs.Mkdir("test/one")
require.NoError(t, err)
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
@@ -610,7 +613,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
require.False(t, found)
// move file
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
require.NoError(t, err)
err = runInstance.retryBlock(func() error {
@@ -672,28 +675,28 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(context.Background(), wrappedTime)
err = o.SetModTime(wrappedTime)
require.NoError(t, err)
// get a new instance from the cache
co, err := rootFs.NewObject(context.Background(), "data.bin")
co, err := rootFs.NewObject("data.bin")
require.NoError(t, err)
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
cfs.DirCacheFlush() // flush the cache
// get a new instance from the cache
co, err = rootFs.NewObject(context.Background(), "data.bin")
co, err = rootFs.NewObject("data.bin")
require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
}
func TestInternalChangeSeenAfterRc(t *testing.T) {
cacheExpire := rc.Calls.Get("cache/expire")
assert.NotNil(t, cacheExpire)
rcflags.Opt.Enabled = true
rc.Start(&rcflags.Opt)
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
@@ -715,44 +718,50 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(context.Background(), wrappedTime)
err = o.SetModTime(wrappedTime)
require.NoError(t, err)
// get a new instance from the cache
co, err := rootFs.NewObject(context.Background(), "data.bin")
co, err := rootFs.NewObject("data.bin")
require.NoError(t, err)
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
// Call the rc function
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
m := make(map[string]string)
res, err := http.Post(fmt.Sprintf("http://localhost:5572/cache/expire?remote=%s", "data.bin"), "application/json; charset=utf-8", strings.NewReader(""))
require.NoError(t, err)
defer func() {
_ = res.Body.Close()
}()
_ = json.NewDecoder(res.Body).Decode(&m)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
require.Contains(t, m["message"], "cached file cleared")
// get a new instance from the cache
co, err = rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
_, err = runInstance.list(t, rootFs, "")
co, err = rootFs.NewObject("data.bin")
require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
li1, err := runInstance.list(t, rootFs, "")
// create some rand test data
testData2 := randStringBytes(int(chunkSize))
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
// list should have 1 item only
li1, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
li1, err = runInstance.list(t, rootFs, "")
require.Len(t, li1, 1)
// Call the rc function
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
m = make(map[string]string)
res2, err := http.Post("http://localhost:5572/cache/expire?remote=/", "application/json; charset=utf-8", strings.NewReader(""))
require.NoError(t, err)
defer func() {
_ = res2.Body.Close()
}()
_ = json.NewDecoder(res2.Body).Decode(&m)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
@@ -760,7 +769,6 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
// list should have 2 items now
li2, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, li2, 2)
}
@@ -796,7 +804,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
// create some rand test data
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
o, err := cfs.NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
o, err := cfs.NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
co, ok := o.(*cache.Object)
require.True(t, ok)
@@ -835,7 +843,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
require.NoError(t, err)
require.Len(t, l, 1)
err = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/third"))
err = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/third"))
require.NoError(t, err)
l, err = runInstance.list(t, rootFs, "test")
@@ -870,14 +878,14 @@ func TestInternalBug2117(t *testing.T) {
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
err = cfs.UnWrap().Mkdir(context.Background(), "test")
err = cfs.UnWrap().Mkdir("test")
require.NoError(t, err)
for i := 1; i <= 4; i++ {
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d", i))
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d", i))
require.NoError(t, err)
for j := 1; j <= 4; j++ {
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d/dir%d", i, j))
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d/dir%d", i, j))
require.NoError(t, err)
runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test")
@@ -1082,10 +1090,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
if purge {
_ = f.Features().Purge(context.Background())
_ = f.Features().Purge()
require.NoError(t, err)
}
err = f.Mkdir(context.Background(), "")
err = f.Mkdir("")
require.NoError(t, err)
if r.useMount && !r.isMounted {
r.mountFs(t, f)
@@ -1099,7 +1107,7 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
r.unmountFs(t, f)
}
err := f.Features().Purge(context.Background())
err := f.Features().Purge()
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
@@ -1201,7 +1209,7 @@ func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.Read
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
in := bytes.NewReader(data)
_ = r.writeObjectReader(t, f, remote, in)
o, err := f.NewObject(context.Background(), remote)
o, err := f.NewObject(remote)
require.NoError(t, err)
require.Equal(t, int64(len(data)), o.Size())
return o
@@ -1210,7 +1218,7 @@ func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte
func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
modTime := time.Now()
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
obj, err := f.Put(context.Background(), in, objInfo)
obj, err := f.Put(in, objInfo)
require.NoError(t, err)
if r.useMount {
r.vfs.WaitForWriters(10 * time.Second)
@@ -1230,18 +1238,18 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
obj, err = f.NewObject(context.Background(), remote)
obj, err = f.NewObject(remote)
} else {
in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
obj, err = f.Put(context.Background(), in1, objInfo1)
obj, err = f.Put(in1, objInfo1)
require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote)
obj, err = f.NewObject(remote)
require.NoError(t, err)
err = obj.Update(context.Background(), in2, objInfo2)
err = obj.Update(in2, objInfo2)
}
require.NoError(t, err)
@@ -1270,7 +1278,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
return checkSample, err
}
} else {
co, err := f.NewObject(context.Background(), remote)
co, err := f.NewObject(remote)
if err != nil {
return checkSample, err
}
@@ -1285,7 +1293,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
size := end - offset
checkSample := make([]byte, size)
reader, err := o.Open(context.Background(), &fs.SeekOption{Offset: offset})
reader, err := o.Open(&fs.SeekOption{Offset: offset})
require.NoError(t, err)
totalRead, err := io.ReadFull(reader, checkSample)
if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
@@ -1302,7 +1310,7 @@ func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
if r.useMount {
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
} else {
err = f.Mkdir(context.Background(), remote)
err = f.Mkdir(remote)
}
require.NoError(t, err)
}
@@ -1314,11 +1322,11 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
err = os.Remove(path.Join(r.mntDir, remote))
} else {
var obj fs.Object
obj, err = f.NewObject(context.Background(), remote)
obj, err = f.NewObject(remote)
if err != nil {
err = f.Rmdir(context.Background(), remote)
err = f.Rmdir(remote)
} else {
err = obj.Remove(context.Background())
err = obj.Remove()
}
}
@@ -1336,7 +1344,7 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
}
} else {
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
list, err = f.List(remote)
for _, ll := range list {
l = append(l, ll)
}
@@ -1355,7 +1363,7 @@ func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
}
} else {
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
list, err = f.List(remote)
for _, ll := range list {
l = append(l, ll.Remote())
}
@@ -1395,7 +1403,7 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().DirMove != nil {
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
err = rootFs.Features().DirMove(rootFs, src, dst)
if err != nil {
return err
}
@@ -1417,11 +1425,11 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Move != nil {
obj1, err := rootFs.NewObject(context.Background(), src)
obj1, err := rootFs.NewObject(src)
if err != nil {
return err
}
_, err = rootFs.Features().Move(context.Background(), obj1, dst)
_, err = rootFs.Features().Move(obj1, dst)
if err != nil {
return err
}
@@ -1443,11 +1451,11 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Copy != nil {
obj, err := rootFs.NewObject(context.Background(), src)
obj, err := rootFs.NewObject(src)
if err != nil {
return err
}
_, err = rootFs.Features().Copy(context.Background(), obj, dst)
_, err = rootFs.Features().Copy(obj, dst)
if err != nil {
return err
}
@@ -1469,11 +1477,11 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
}
return fi.ModTime(), nil
}
obj1, err := rootFs.NewObject(context.Background(), src)
obj1, err := rootFs.NewObject(src)
if err != nil {
return time.Time{}, err
}
return obj1.ModTime(context.Background()), nil
return obj1.ModTime(), nil
}
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
@@ -1486,7 +1494,7 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
}
return fi.Size(), nil
}
obj1, err := rootFs.NewObject(context.Background(), src)
obj1, err := rootFs.NewObject(src)
if err != nil {
return int64(0), err
}
@@ -1497,8 +1505,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
var err error
if r.useMount {
var f *os.File
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
f, err := os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
@@ -1508,15 +1515,14 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
}()
_, err = f.WriteString(data + append)
} else {
var obj1 fs.Object
obj1, err = rootFs.NewObject(context.Background(), src)
obj1, err := rootFs.NewObject(src)
if err != nil {
return err
}
data1 := []byte(data + append)
r := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), r, objInfo1)
err = obj1.Update(r, objInfo1)
}
return err
@@ -1641,13 +1647,15 @@ func (r *run) getCacheFs(f fs.Fs) (*cache.Fs, error) {
cfs, ok := f.(*cache.Fs)
if ok {
return cfs, nil
}
if f.Features().UnWrap != nil {
cfs, ok := f.Features().UnWrap().(*cache.Fs)
if ok {
return cfs, nil
} else {
if f.Features().UnWrap != nil {
cfs, ok := f.Features().UnWrap().(*cache.Fs)
if ok {
return cfs, nil
}
}
}
return nil, errors.New("didn't found a cache fs")
}

View File

@@ -9,9 +9,9 @@ import (
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/rclone/rclone/cmd/mount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/ncw/rclone/cmd/mount"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require"
)

View File

@@ -9,10 +9,10 @@ import (
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/ncw/rclone/cmd/cmount"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/cmount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require"
)

View File

@@ -7,17 +7,15 @@ package cache_test
import (
"testing"
"github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
})
}

View File

@@ -3,8 +3,6 @@
package cache_test
import (
"context"
"fmt"
"math/rand"
"os"
"path"
@@ -12,9 +10,11 @@ import (
"testing"
"time"
"github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/drive"
"github.com/rclone/rclone/fs"
"fmt"
"github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require"
)
@@ -86,11 +86,11 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "one/test")
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "second")
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
@@ -123,11 +123,11 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "one/test")
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "second")
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
@@ -166,7 +166,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test")
err := rootFs.Mkdir("test")
require.NoError(t, err)
minSize := 5242880
maxSize := 10485760
@@ -234,9 +234,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "second/one")
_, err = rootFs.NewObject("second/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -257,7 +257,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
require.Contains(t, err.Error(), "directory not empty")
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -271,9 +271,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
if err != errNotSupported {
require.NoError(t, err)
// try to read from it
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/second")
_, err = rootFs.NewObject("test/second")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
@@ -290,9 +290,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/third")
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
@@ -307,7 +307,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
// test Remove -- allowed
err = runInstance.rm(t, rootFs, "test/one")
require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -319,7 +319,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
require.NoError(t, err)
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
require.NoError(t, err)
obj2, err := rootFs.NewObject(context.Background(), "test/one")
obj2, err := rootFs.NewObject("test/one")
require.NoError(t, err)
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
require.Equal(t, "one content updated", string(data2))
@@ -367,7 +367,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -379,7 +379,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
// test Rmdir
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -390,9 +390,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
if err != errNotSupported {
require.Error(t, err)
// try to read from it
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/second")
_, err = rootFs.NewObject("test/second")
require.Error(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -405,9 +405,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/third")
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
@@ -422,7 +422,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
// test Remove
err = runInstance.rm(t, rootFs, "test/one")
require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))

455
backend/cache/cache_upload_test.go.orig vendored Normal file
View File

@@ -0,0 +1,455 @@
// +build !plan9
package cache_test
import (
"math/rand"
"os"
"path"
"strconv"
"testing"
"time"
"fmt"
"github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require"
)
func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err)
}
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
// create some rand test data
testSize := int64(524288000)
testReader := runInstance.randomReader(t, testSize)
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
// validate that it exists in temp fs
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.NoError(t, err)
if runInstance.rootIsCrypt {
require.Equal(t, int64(524416032), ti.Size())
} else {
require.Equal(t, testSize, ti.Size())
}
de1, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, de1, 1)
runInstance.completeBackgroundUpload(t, "one", bu)
// check if it was removed from temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.True(t, os.IsNotExist(err))
// check if it can be read
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
require.NoError(t, err)
require.Len(t, data2, 1024)
}
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
testSize := int64(10485760)
testReader := runInstance.randomReader(t, testSize)
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
de1, err := runInstance.list(t, rootFs, "one/test")
require.NoError(t, err)
require.Len(t, de1, 1)
time.Sleep(time.Second * 5)
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
//require.NoError(t, err)
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
require.NoError(t, err)
// check if it can be read
de1, err = runInstance.list(t, rootFs, "second/test")
require.NoError(t, err)
require.Len(t, de1, 1)
}
func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
testSize := int64(1048576)
testReader := runInstance.randomReader(t, testSize)
testReader2 := runInstance.randomReader(t, testSize)
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
require.True(t, os.IsNotExist(err))
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.True(t, os.IsNotExist(err))
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
require.False(t, os.IsNotExist(err))
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
require.True(t, os.IsNotExist(err))
de1, err := runInstance.list(t, rootFs, "one/test")
require.NoError(t, err)
require.Len(t, de1, 1)
// check if it can be read
de1, err = runInstance.list(t, rootFs, "second")
require.NoError(t, err)
require.Len(t, de1, 1)
}
func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("test")
require.NoError(t, err)
minSize := 5242880
maxSize := 10485760
totalFiles := 10
rand.Seed(time.Now().Unix())
lastFile := ""
for i := 0; i < totalFiles; i++ {
size := int64(rand.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
// validate that it exists in temp fs
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
require.NoError(t, err)
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
if runInstance.wrappedIsExternal && i < totalFiles-1 {
time.Sleep(time.Second * 3)
}
lastFile = remote
}
// check if cache lists all files, likely temp upload didn't finish yet
de1, err := runInstance.list(t, rootFs, "test")
require.NoError(t, err)
require.Len(t, de1, totalFiles)
// wait for background uploader to do its thing
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
// retry until we have no more temp files and fail if they don't go down to 0
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
require.True(t, os.IsNotExist(err))
// check if cache lists all files
de1, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err)
require.Len(t, de1, totalFiles)
}
func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
// create some rand test data
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// check if it can be read
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data1)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
// test DirMove - allowed
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject("second/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
require.NoError(t, err)
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
require.Error(t, err)
var started bool
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
require.NoError(t, err)
require.False(t, started)
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
}
// test Rmdir - allowed
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
require.Contains(t, err.Error(), "directory not empty")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
require.False(t, started)
require.NoError(t, err)
// test Move/Rename -- allowed
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
if err != errNotSupported {
require.NoError(t, err)
// try to read from it
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject("test/second")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
require.NoError(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
}
// test Copy -- allowed
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
require.NoError(t, err)
}
// test Remove -- allowed
err = runInstance.rm(t, rootFs, "test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// test Update -- allowed
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
require.NoError(t, err)
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
require.NoError(t, err)
obj2, err := rootFs.NewObject("test/one")
require.NoError(t, err)
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
require.Equal(t, "one content updated", string(data2))
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
if runInstance.rootIsCrypt {
require.Equal(t, int64(67), tmpInfo.Size())
} else {
require.Equal(t, int64(len(data2)), tmpInfo.Size())
}
// test SetModTime -- allowed
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
require.NoError(t, err)
require.NotEqual(t, secondModTime, firstModTime)
require.NotEqual(t, time.Time{}, firstModTime)
require.NotEqual(t, time.Time{}, secondModTime)
}
func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
// create some rand test data
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// check if it can be read
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data1)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
require.NoError(t, err)
// test DirMove
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
require.Error(t, err)
}
// test Rmdir
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
// test Move/Rename
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
if err != errNotSupported {
require.Error(t, err)
// try to read from it
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/second")
require.Error(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
require.Error(t, err)
}
// test Copy -- allowed
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
require.NoError(t, err)
}
// test Remove
err = runInstance.rm(t, rootFs, "test/one")
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// test Update - this seems to work. Why? FIXME
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
//require.NoError(t, err)
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
// require.Equal(t, "one content", string(data2))
//
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
// require.NoError(t, err)
// if runInstance.rootIsCrypt {
// require.Equal(t, int64(67), tmpInfo.Size())
// } else {
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
// }
//})
//require.Error(t, err)
// test SetModTime -- seems to work cause of previous
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
//require.NoError(t, err)
//require.Equal(t, secondModTime, firstModTime)
//require.NotEqual(t, time.Time{}, firstModTime)
//require.NotEqual(t, time.Time{}, secondModTime)
}

12
backend/cache/cache_upload_test.go.rej vendored Normal file
View File

@@ -0,0 +1,12 @@
--- cache_upload_test.go
+++ cache_upload_test.go
@@ -1500,9 +1469,6 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
}
r.tempFiles = nil
debug.FreeOSMemory()
- for k, v := range r.runDefaultFlagMap {
- _ = flag.Set(k, v)
- }
}
func (r *run) randomBytes(t *testing.T, size int64) []byte {

View File

@@ -3,16 +3,16 @@
package cache
import (
"context"
"path"
"time"
"github.com/rclone/rclone/fs"
"path"
"github.com/ncw/rclone/fs"
)
// Directory is a generic dir that stores basic information about it
type Directory struct {
Directory fs.Directory `json:"-"` // can be nil
fs.Directory `json:"-"`
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
@@ -56,7 +56,7 @@ func ShallowDirectory(f *Fs, remote string) *Directory {
}
// DirectoryFromOriginal builds one from a generic fs.Directory
func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Directory {
func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
var cd *Directory
fullRemote := path.Join(f.Root(), d.Remote())
@@ -68,7 +68,7 @@ func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Director
CacheFs: f,
Name: name,
Dir: dir,
CacheModTime: d.ModTime(ctx).UnixNano(),
CacheModTime: d.ModTime().UnixNano(),
CacheSize: d.Size(),
CacheItems: d.Items(),
CacheType: "Directory",
@@ -111,7 +111,7 @@ func (d *Directory) parentRemote() string {
}
// ModTime returns the cached ModTime
func (d *Directory) ModTime(ctx context.Context) time.Time {
func (d *Directory) ModTime() time.Time {
return time.Unix(0, d.CacheModTime)
}
@@ -125,14 +125,6 @@ func (d *Directory) Items() int64 {
return d.CacheItems
}
// ID returns the ID of the cached directory if known
func (d *Directory) ID() string {
if d.Directory == nil {
return ""
}
return d.Directory.ID()
}
var (
_ fs.Directory = (*Directory)(nil)
)

View File

@@ -3,18 +3,18 @@
package cache
import (
"context"
"fmt"
"io"
"path"
"runtime"
"strings"
"sync"
"time"
"path"
"runtime"
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
)
var uploaderMap = make(map[string]*backgroundWriter)
@@ -41,7 +41,6 @@ func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
// Handle is managing the read/write/seek operations on an open handle
type Handle struct {
ctx context.Context
cachedObject *Object
cfs *Fs
memory *Memory
@@ -50,19 +49,17 @@ type Handle struct {
offset int64
seenOffsets map[int64]bool
mu sync.Mutex
workersWg sync.WaitGroup
confirmReading chan bool
workers int
maxWorkerID int
UseMemory bool
closed bool
reading bool
UseMemory bool
workers []*worker
closed bool
reading bool
}
// NewObjectHandle returns a new Handle for an existing Object
func NewObjectHandle(ctx context.Context, o *Object, cfs *Fs) *Handle {
func NewObjectHandle(o *Object, cfs *Fs) *Handle {
r := &Handle{
ctx: ctx,
cachedObject: o,
cfs: cfs,
offset: 0,
@@ -98,7 +95,7 @@ func (r *Handle) String() string {
// startReadWorkers will start the worker pool
func (r *Handle) startReadWorkers() {
if r.workers > 0 {
if r.hasAtLeastOneWorker() {
return
}
totalWorkers := r.cacheFs().opt.TotalWorkers
@@ -120,27 +117,26 @@ func (r *Handle) startReadWorkers() {
// scaleOutWorkers will increase the worker pool count by the provided amount
func (r *Handle) scaleWorkers(desired int) {
current := r.workers
current := len(r.workers)
if current == desired {
return
}
if current > desired {
// scale in gracefully
for r.workers > desired {
for i := 0; i < current-desired; i++ {
r.preloadQueue <- -1
r.workers--
}
} else {
// scale out
for r.workers < desired {
for i := 0; i < desired-current; i++ {
w := &worker{
r: r,
id: r.maxWorkerID,
ch: r.preloadQueue,
id: current + i,
}
r.workersWg.Add(1)
r.workers++
r.maxWorkerID++
go w.run()
r.workers = append(r.workers, w)
}
}
// ignore first scale out from 0
@@ -152,7 +148,7 @@ func (r *Handle) scaleWorkers(desired int) {
func (r *Handle) confirmExternalReading() {
// if we have a max value of workers
// then we skip this step
if r.workers > 1 ||
if len(r.workers) > 1 ||
!r.cacheFs().plexConnector.isConfigured() {
return
}
@@ -182,7 +178,7 @@ func (r *Handle) queueOffset(offset int64) {
}
}
for i := 0; i < r.workers; i++ {
for i := 0; i < len(r.workers); i++ {
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
if o < 0 || o >= r.cachedObject.Size() {
continue
@@ -197,6 +193,16 @@ func (r *Handle) queueOffset(offset int64) {
}
}
func (r *Handle) hasAtLeastOneWorker() bool {
oneWorker := false
for i := 0; i < len(r.workers); i++ {
if r.workers[i].isRunning() {
oneWorker = true
}
}
return oneWorker
}
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
// it can be from transient or persistent cache
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
@@ -237,7 +243,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
// not found in ram or
// the worker didn't managed to download the chunk in time so we abort and close the stream
if err != nil || len(data) == 0 || !found {
if r.workers == 0 {
if !r.hasAtLeastOneWorker() {
fs.Errorf(r, "out of workers")
return nil, io.ErrUnexpectedEOF
}
@@ -298,7 +304,14 @@ func (r *Handle) Close() error {
close(r.preloadQueue)
r.closed = true
// wait for workers to complete their jobs before returning
r.workersWg.Wait()
waitCount := 3
for i := 0; i < len(r.workers); i++ {
waitIdx := 0
for r.workers[i].isRunning() && waitIdx < waitCount {
time.Sleep(time.Second)
waitIdx++
}
}
r.memory.db.Flush()
fs.Debugf(r, "cache reader closed %v", r.offset)
@@ -335,9 +348,12 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
}
type worker struct {
r *Handle
rc io.ReadCloser
id int
r *Handle
ch <-chan int64
rc io.ReadCloser
id int
running bool
mu sync.Mutex
}
// String is a representation of this worker
@@ -354,7 +370,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
r := w.rc
if w.rc == nil {
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
return w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
})
if err != nil {
return nil, err
@@ -364,7 +380,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
if !closeOpen {
if do, ok := r.(fs.RangeSeeker); ok {
_, err = do.RangeSeek(w.r.ctx, offset, io.SeekStart, end-offset)
_, err = do.RangeSeek(offset, io.SeekStart, end-offset)
return r, err
} else if do, ok := r.(io.Seeker); ok {
_, err = do.Seek(offset, io.SeekStart)
@@ -374,7 +390,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
_ = w.rc.Close()
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
r, err = w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
if err != nil {
return nil, err
}
@@ -382,19 +398,33 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
})
}
func (w *worker) isRunning() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.running
}
func (w *worker) setRunning(f bool) {
w.mu.Lock()
defer w.mu.Unlock()
w.running = f
}
// run is the main loop for the worker which receives offsets to preload
func (w *worker) run() {
var err error
var data []byte
defer w.setRunning(false)
defer func() {
if w.rc != nil {
_ = w.rc.Close()
w.setRunning(false)
}
w.r.workersWg.Done()
}()
for {
chunkStart, open := <-w.r.preloadQueue
chunkStart, open := <-w.ch
w.setRunning(true)
if chunkStart < 0 || !open {
break
}
@@ -452,7 +482,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
// we seem to be getting only errors so we abort
if err != nil {
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
err = w.r.cachedObject.refreshFromSource(true)
if err != nil {
fs.Errorf(w, "%v", err)
}
@@ -465,7 +495,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
sourceRead, err = io.ReadFull(w.rc, data)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
err = w.r.cachedObject.refreshFromSource(true)
if err != nil {
fs.Errorf(w, "%v", err)
}
@@ -591,7 +621,7 @@ func (b *backgroundWriter) run() {
remote := b.fs.cleanRootFromPath(absPath)
b.notify(remote, BackgroundUploadStarted, nil)
fs.Infof(remote, "background upload: started upload")
err = operations.MoveFile(context.TODO(), b.fs.UnWrap(), b.fs.tempFs, remote, remote)
err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote)
if err != nil {
b.notify(remote, BackgroundUploadError, err)
_ = b.fs.cache.rollbackPendingUpload(absPath)
@@ -601,14 +631,14 @@ func (b *backgroundWriter) run() {
// clean empty dirs up to root
thisDir := cleanPath(path.Dir(remote))
for thisDir != "" {
thisList, err := b.fs.tempFs.List(context.TODO(), thisDir)
thisList, err := b.fs.tempFs.List(thisDir)
if err != nil {
break
}
if len(thisList) > 0 {
break
}
err = b.fs.tempFs.Rmdir(context.TODO(), thisDir)
err = b.fs.tempFs.Rmdir(thisDir)
fs.Debugf(thisDir, "cleaned from temp path")
if err != nil {
break

View File

@@ -3,16 +3,15 @@
package cache
import (
"context"
"io"
"path"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
)
const (
@@ -69,7 +68,7 @@ func NewObject(f *Fs, remote string) *Object {
}
// ObjectFromOriginal builds one from a generic fs.Object
func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
var co *Object
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
dir, name := path.Split(fullRemote)
@@ -93,13 +92,13 @@ func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
CacheType: cacheType,
CacheTs: time.Now(),
}
co.updateData(ctx, o)
co.updateData(o)
return co
}
func (o *Object) updateData(ctx context.Context, source fs.Object) {
func (o *Object) updateData(source fs.Object) {
o.Object = source
o.CacheModTime = source.ModTime(ctx).UnixNano()
o.CacheModTime = source.ModTime().UnixNano()
o.CacheSize = source.Size()
o.CacheStorable = source.Storable()
o.CacheTs = time.Now()
@@ -131,20 +130,20 @@ func (o *Object) abs() string {
}
// ModTime returns the cached ModTime
func (o *Object) ModTime(ctx context.Context) time.Time {
_ = o.refresh(ctx)
func (o *Object) ModTime() time.Time {
_ = o.refresh()
return time.Unix(0, o.CacheModTime)
}
// Size returns the cached Size
func (o *Object) Size() int64 {
_ = o.refresh(context.TODO())
_ = o.refresh()
return o.CacheSize
}
// Storable returns the cached Storable
func (o *Object) Storable() bool {
_ = o.refresh(context.TODO())
_ = o.refresh()
return o.CacheStorable
}
@@ -152,18 +151,18 @@ func (o *Object) Storable() bool {
// all these conditions must be true to ignore a refresh
// 1. cache ts didn't expire yet
// 2. is not pending a notification from the wrapped fs
func (o *Object) refresh(ctx context.Context) error {
func (o *Object) refresh() error {
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
if !isExpired && !isNotified {
return nil
}
return o.refreshFromSource(ctx, true)
return o.refreshFromSource(true)
}
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
func (o *Object) refreshFromSource(force bool) error {
o.refreshMutex.Lock()
defer o.refreshMutex.Unlock()
var err error
@@ -173,29 +172,29 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
return nil
}
if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
liveObject, err = o.ParentFs.NewObject(o.Remote())
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
} else {
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
}
if err != nil {
fs.Errorf(o, "error refreshing object in : %v", err)
return err
}
o.updateData(ctx, liveObject)
o.updateData(liveObject)
o.persist()
return nil
}
// SetModTime sets the ModTime of this object
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
if err := o.refreshFromSource(ctx, false); err != nil {
func (o *Object) SetModTime(t time.Time) error {
if err := o.refreshFromSource(false); err != nil {
return err
}
err := o.Object.SetModTime(ctx, t)
err := o.Object.SetModTime(t)
if err != nil {
return err
}
@@ -208,19 +207,13 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
}
// Open is used to request a specific part of the file using fs.RangeOption
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
var err error
if o.Object == nil {
err = o.refreshFromSource(ctx, true)
} else {
err = o.refresh(ctx)
}
if err != nil {
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
if err := o.refreshFromSource(true); err != nil {
return nil, err
}
cacheReader := NewObjectHandle(ctx, o, o.CacheFs)
var err error
cacheReader := NewObjectHandle(o, o.CacheFs)
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
@@ -239,8 +232,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
}
// Update will change the object data
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if err := o.refreshFromSource(ctx, false); err != nil {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if err := o.refreshFromSource(false); err != nil {
return err
}
// pause background uploads if active
@@ -255,7 +248,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "updating object contents with size %v", src.Size())
// FIXME use reliable upload
err := o.Object.Update(ctx, in, src, options...)
err := o.Object.Update(in, src, options...)
if err != nil {
fs.Errorf(o, "error updating source: %v", err)
return err
@@ -266,7 +259,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// advertise to ChangeNotify if wrapped doesn't do that
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
o.CacheModTime = src.ModTime(ctx).UnixNano()
o.CacheModTime = src.ModTime().UnixNano()
o.CacheSize = src.Size()
o.CacheHashes = make(map[hash.Type]string)
o.CacheTs = time.Now()
@@ -276,8 +269,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove deletes the object from both the cache and the source
func (o *Object) Remove(ctx context.Context) error {
if err := o.refreshFromSource(ctx, false); err != nil {
func (o *Object) Remove() error {
if err := o.refreshFromSource(false); err != nil {
return err
}
// pause background uploads if active
@@ -289,7 +282,7 @@ func (o *Object) Remove(ctx context.Context) error {
return errors.Errorf("%v is currently uploading, can't delete", o)
}
}
err := o.Object.Remove(ctx)
err := o.Object.Remove()
if err != nil {
return err
}
@@ -307,8 +300,8 @@ func (o *Object) Remove(ctx context.Context) error {
// Hash requests a hash of the object and stores in the cache
// since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
_ = o.refresh(ctx)
func (o *Object) Hash(ht hash.Type) (string, error) {
_ = o.refresh()
if o.CacheHashes == nil {
o.CacheHashes = make(map[hash.Type]string)
}
@@ -317,10 +310,10 @@ func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
if found {
return cachedHash, nil
}
if err := o.refreshFromSource(ctx, false); err != nil {
if err := o.refreshFromSource(false); err != nil {
return "", err
}
liveHash, err := o.Object.Hash(ctx, ht)
liveHash, err := o.Object.Hash(ht)
if err != nil {
return "", err
}
@@ -360,13 +353,6 @@ func (o *Object) tempFileStartedUpload() bool {
return started
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *Object) UnWrap() fs.Object {
return o.Object
}
var (
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.Object = (*Object)(nil)
)

44
backend/cache/plex.go vendored
View File

@@ -3,19 +3,20 @@
package cache
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"sync"
"time"
cache "github.com/patrickmn/go-cache"
"github.com/rclone/rclone/fs"
"sync"
"bytes"
"io/ioutil"
"github.com/ncw/rclone/fs"
"github.com/patrickmn/go-cache"
"golang.org/x/net/websocket"
)
@@ -53,7 +54,6 @@ type plexConnector struct {
username string
password string
token string
insecure bool
f *Fs
mu sync.Mutex
running bool
@@ -63,7 +63,7 @@ type plexConnector struct {
}
// newPlexConnector connects to a Plex server and generates a token
func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool, saveToken func(string)) (*plexConnector, error) {
func newPlexConnector(f *Fs, plexURL, username, password string, saveToken func(string)) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil {
return nil, err
@@ -75,7 +75,6 @@ func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool,
username: username,
password: password,
token: "",
insecure: insecure,
stateCache: cache.New(time.Hour, time.Minute),
saveToken: saveToken,
}
@@ -84,7 +83,7 @@ func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool,
}
// newPlexConnector connects to a Plex server and generates a token
func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*plexConnector, error) {
func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil {
return nil, err
@@ -94,7 +93,6 @@ func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*pl
f: f,
url: u,
token: token,
insecure: insecure,
stateCache: cache.New(time.Hour, time.Minute),
}
pc.listenWebsocket()
@@ -109,26 +107,14 @@ func (p *plexConnector) closeWebsocket() {
p.running = false
}
func (p *plexConnector) websocketDial() (*websocket.Conn, error) {
u := strings.TrimRight(strings.Replace(strings.Replace(
p.url.String(), "http://", "ws://", 1), "https://", "wss://", 1), "/")
url := fmt.Sprintf(defPlexNotificationURL, u, p.token)
config, err := websocket.NewConfig(url, "http://localhost")
if err != nil {
return nil, err
}
if p.insecure {
config.TlsConfig = &tls.Config{InsecureSkipVerify: true}
}
return websocket.DialConfig(config)
}
func (p *plexConnector) listenWebsocket() {
p.runningMu.Lock()
defer p.runningMu.Unlock()
conn, err := p.websocketDial()
u := strings.Replace(p.url.String(), "http://", "ws://", 1)
u = strings.Replace(u, "https://", "wss://", 1)
conn, err := websocket.Dial(fmt.Sprintf(defPlexNotificationURL, strings.TrimRight(u, "/"), p.token),
"", "http://localhost")
if err != nil {
fs.Errorf("plex", "%v", err)
return
@@ -224,9 +210,7 @@ func (p *plexConnector) authenticate() error {
}
p.token = token
if p.token != "" {
if p.saveToken != nil {
p.saveToken(p.token)
}
p.saveToken(p.token)
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
}
p.listenWebsocket()

View File

@@ -7,9 +7,9 @@ import (
"strings"
"time"
cache "github.com/patrickmn/go-cache"
"github.com/ncw/rclone/fs"
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
// Memory is a wrapper of transient storage for a go-cache store

View File

@@ -3,23 +3,25 @@
package cache
import (
"time"
"bytes"
"context"
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"io/ioutil"
"fmt"
bolt "github.com/coreos/bbolt"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/walk"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
)
// Constants
@@ -399,7 +401,7 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
}
err = bucket.Put([]byte(cachedObject.Name), encoded)
err = bucket.Put([]byte(cachedObject.Name), []byte(encoded))
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
}
@@ -810,7 +812,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
}
err = bucket.Put([]byte(destPath), encoded)
err = bucket.Put([]byte(destPath), []byte(encoded))
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}
@@ -1015,7 +1017,7 @@ func (b *Persistent) SetPendingUploadToStarted(remote string) error {
}
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
return b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
@@ -1024,7 +1026,7 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
}
var queuedEntries []fs.Object
err = walk.ListR(ctx, cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
err = walk.Walk(cacheFs.tempFs, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
for _, o := range entries {
if oo, ok := o.(fs.Object); ok {
queuedEntries = append(queuedEntries, oo)
@@ -1050,7 +1052,7 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
}
err = bucket.Put([]byte(destPath), encoded)
err = bucket.Put([]byte(destPath), []byte(encoded))
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}

View File

@@ -2,7 +2,6 @@ package crypt
import (
"bytes"
"context"
"crypto/aes"
gocipher "crypto/cipher"
"crypto/rand"
@@ -14,13 +13,15 @@ import (
"sync"
"unicode/utf8"
"github.com/ncw/rclone/backend/crypt/pkcs7"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/scrypt"
"github.com/rfjakob/eme"
)
// Constants
@@ -42,7 +43,6 @@ var (
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
@@ -69,7 +69,7 @@ type ReadSeekCloser interface {
}
// OpenRangeSeek opens the file handle at the offset with the limit given
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
type OpenRangeSeek func(offset, limit int64) (io.ReadCloser, error)
// Cipher is used to swap out the encryption implementations
type Cipher interface {
@@ -86,7 +86,7 @@ type Cipher interface {
// DecryptData
DecryptData(io.ReadCloser) (io.ReadCloser, error)
// DecryptDataSeek decrypt at a given position
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
// EncryptedSize calculates the size of the data when encrypted
EncryptedSize(int64) int64
// DecryptedSize calculates the size of the data when decrypted
@@ -286,9 +286,6 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
// not possible if decodeFilename() working correctly
return "", ErrorTooShortAfterDecode
}
if len(rawCiphertext) > 2048 {
return "", ErrorTooLongAfterDecode
}
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
if err != nil {
@@ -464,7 +461,7 @@ func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
if int(newRune) < base {
newRune += 256
}
_, _ = result.WriteRune(newRune)
_, _ = result.WriteRune(rune(newRune))
default:
_, _ = result.WriteRune(runeValue)
@@ -749,29 +746,29 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) {
return nil, fh.finishAndClose(ErrorEncryptedBadMagic)
}
// retrieve the nonce
// retreive the nonce
fh.nonce.fromBuf(readBuf[fileMagicSize:])
fh.initialNonce = fh.nonce
return fh, nil
}
// newDecrypterSeek creates a new file handle decrypting on the fly
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
var rc io.ReadCloser
doRangeSeek := false
setLimit := false
// Open initially with no seek
if offset == 0 && limit < 0 {
// If no offset or limit then open whole file
rc, err = open(ctx, 0, -1)
rc, err = open(0, -1)
} else if offset == 0 {
// If no offset open the header + limit worth of the file
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
rc, err = open(ctx, 0, int64(fileHeaderSize)+underlyingLimit)
rc, err = open(0, int64(fileHeaderSize)+underlyingLimit)
setLimit = true
} else {
// Otherwise just read the header to start with
rc, err = open(ctx, 0, int64(fileHeaderSize))
rc, err = open(0, int64(fileHeaderSize))
doRangeSeek = true
}
if err != nil {
@@ -784,7 +781,7 @@ func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
}
fh.open = open // will be called by fh.RangeSeek
if doRangeSeek {
_, err = fh.RangeSeek(ctx, offset, io.SeekStart, limit)
_, err = fh.RangeSeek(offset, io.SeekStart, limit)
if err != nil {
_ = fh.Close()
return nil, err
@@ -904,7 +901,7 @@ func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit
// limiting the total length to limit.
//
// RangeSeek with a limit of < 0 is equivalent to a regular Seek.
func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, limit int64) (int64, error) {
func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, error) {
fh.mu.Lock()
defer fh.mu.Unlock()
@@ -931,7 +928,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Can we seek underlying stream directly?
if do, ok := fh.rc.(fs.RangeSeeker); ok {
// Seek underlying stream directly
_, err := do.RangeSeek(ctx, underlyingOffset, 0, underlyingLimit)
_, err := do.RangeSeek(underlyingOffset, 0, underlyingLimit)
if err != nil {
return 0, fh.finish(err)
}
@@ -941,7 +938,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
fh.rc = nil
// Re-open the underlying object with the offset given
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
rc, err := fh.open(underlyingOffset, underlyingLimit)
if err != nil {
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
}
@@ -970,7 +967,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Seek implements the io.Seeker interface
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
return fh.RangeSeek(context.TODO(), offset, whence, -1)
return fh.RangeSeek(offset, whence, -1)
}
// finish sets the final error and tidies up
@@ -1044,8 +1041,8 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// The open function must return a ReadCloser opened to the offset supplied
//
// You must use this form of DecryptData if you might want to Seek the file handle
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(ctx, open, offset, limit)
func (c *cipher) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(open, offset, limit)
if err != nil {
return nil, err
}

View File

@@ -2,7 +2,6 @@ package crypt
import (
"bytes"
"context"
"encoding/base32"
"fmt"
"io"
@@ -10,8 +9,8 @@ import (
"strings"
"testing"
"github.com/ncw/rclone/backend/crypt/pkcs7"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -25,7 +24,7 @@ func TestNewNameEncryptionMode(t *testing.T) {
{"off", NameEncryptionOff, ""},
{"standard", NameEncryptionStandard, ""},
{"obfuscate", NameEncryptionObfuscated, ""},
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
{"potato", NameEncryptionMode(0), "Unknown file name encryption mode \"potato\""},
} {
actual, actualErr := NewNameEncryptionMode(test.in)
assert.Equal(t, actual, test.expected)
@@ -195,10 +194,6 @@ func TestEncryptSegment(t *testing.T) {
func TestDecryptSegment(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := make([]byte, 3328)
for i := range longName {
longName[i] = 'a'
}
c, _ := newCipher(NameEncryptionStandard, "", "", true)
for _, test := range []struct {
in string
@@ -206,7 +201,6 @@ func TestDecryptSegment(t *testing.T) {
}{
{"64=", ErrorBadBase32Encoding},
{"!", base32.CorruptInputError(0)},
{string(longName), ErrorTooLongAfterDecode},
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
@@ -966,7 +960,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
// Open stream with a seek of underlyingOffset
var reader io.ReadCloser
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
open := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
end := len(ciphertext)
if underlyingLimit >= 0 {
end = int(underlyingOffset + underlyingLimit)
@@ -1007,7 +1001,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
if offset+limit > len(plaintext) {
continue
}
rc, err := c.DecryptDataSeek(context.Background(), open, int64(offset), int64(limit))
rc, err := c.DecryptDataSeek(open, int64(offset), int64(limit))
assert.NoError(t, err)
check(rc, offset, limit)
@@ -1015,14 +1009,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
}
// Try decoding it with a single open and lots of seeks
fh, err := c.DecryptDataSeek(context.Background(), open, 0, -1)
fh, err := c.DecryptDataSeek(open, 0, -1)
assert.NoError(t, err)
for _, offset := range trials {
for _, limit := range limits {
if offset+limit > len(plaintext) {
continue
}
_, err := fh.RangeSeek(context.Background(), int64(offset), io.SeekStart, int64(limit))
_, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit))
assert.NoError(t, err)
check(fh, offset, limit)
@@ -1073,7 +1067,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
} {
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
callCount := 0
testOpen := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
testOpen := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
switch callCount {
case 0:
assert.Equal(t, int64(0), underlyingOffset, what)
@@ -1085,11 +1079,11 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
t.Errorf("Too many calls %d for %s", callCount+1, what)
}
callCount++
return open(ctx, underlyingOffset, underlyingLimit)
return open(underlyingOffset, underlyingLimit)
}
fh, err := c.DecryptDataSeek(context.Background(), testOpen, 0, -1)
fh, err := c.DecryptDataSeek(testOpen, 0, -1)
assert.NoError(t, err)
gotOffset, err := fh.RangeSeek(context.Background(), test.offset, io.SeekStart, test.limit)
gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit)
assert.NoError(t, err)
assert.Equal(t, gotOffset, test.offset)
}

View File

@@ -2,20 +2,18 @@
package crypt
import (
"context"
"fmt"
"io"
"path"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
)
// Globals
@@ -68,16 +66,8 @@ func init() {
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
IsPassword: true,
}, {
Name: "show_mapping",
Help: `For all files listed show how the names encrypt.
If this flag is set then for each file that the remote is asked to
list, it will log (at level INFO) a line stating the decrypted file
name and the encrypted file name.
This is so you can work out which encrypted names are which decrypted
names just in case you need to do something with the encrypted file
names, or for debugging purposes.`,
Name: "show_mapping",
Help: "For all files listed show how the names encrypt.",
Default: false,
Hide: fs.OptionHideConfigurator,
Advanced: true,
@@ -123,7 +113,7 @@ func NewCipher(m configmap.Mapper) (Cipher, error) {
return newCipherForConfig(opt)
}
// NewFs constructs an Fs from the path, container:path
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
@@ -139,20 +129,16 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
}
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
}
// Look for a file first
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
remotePath := path.Join(remote, cipher.EncryptFileName(rpath))
wrappedFs, err := fs.NewFs(remotePath)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
remotePath = path.Join(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = fs.NewFs(remotePath)
}
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remotePath)
}
f := &Fs{
Fs: wrappedFs,
@@ -170,10 +156,23 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
WriteMimeType: false,
BucketBased: true,
CanHaveEmptyDirectories: true,
SetTier: true,
GetTier: true,
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
doChangeNotify := wrappedFs.Features().ChangeNotify
if doChangeNotify != nil {
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
decrypted, err := f.DecryptFileName(path)
if err != nil {
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
return
}
notifyFunc(decrypted, entryType)
}
return doChangeNotify(wrappedNotifyFunc, pollInterval)
}
}
return f, err
}
@@ -190,7 +189,6 @@ type Options struct {
// Fs represents a wrapped fs.Fs
type Fs struct {
fs.Fs
wrapper fs.Fs
name string
root string
opt Options
@@ -233,7 +231,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
}
// Encrypt an directory file name to entries.
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
@@ -243,18 +241,18 @@ func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Director
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newDir(ctx, dir))
*entries = append(*entries, f.newDir(dir))
}
// Encrypt some directory entries. This alters entries returning it as newEntries.
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
f.add(&newEntries, x)
case fs.Directory:
f.addDir(ctx, &newEntries, x)
f.addDir(&newEntries, x)
default:
return nil, errors.Errorf("Unknown object type %T", entry)
}
@@ -271,12 +269,12 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
entries, err = f.Fs.List(f.cipher.EncryptDirName(dir))
if err != nil {
return nil, err
}
return f.encryptEntries(ctx, entries)
return f.encryptEntries(entries)
}
// ListR lists the objects and directories of the Fs starting
@@ -295,9 +293,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
return f.Fs.Features().ListR(ctx, f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
newEntries, err := f.encryptEntries(ctx, entries)
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
newEntries, err := f.encryptEntries(entries)
if err != nil {
return err
}
@@ -306,18 +304,18 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(ctx, f.cipher.EncryptFileName(remote))
func (f *Fs) NewObject(remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote))
if err != nil {
return nil, err
}
return f.newObject(o), nil
}
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
// Encrypt the data into wrappedIn
wrappedIn, err := f.cipher.EncryptData(in)
if err != nil {
@@ -333,17 +331,11 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
return nil, err
}
// unwrap the accounting
var wrap accounting.WrapFn
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
// add the hasher
wrappedIn = io.TeeReader(wrappedIn, hasher)
// wrap the accounting back on
wrappedIn = wrap(wrappedIn)
}
// Transfer the data
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
o, err := put(wrappedIn, f.newObjectInfo(src), options...)
if err != nil {
return nil, err
}
@@ -352,13 +344,13 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if ht != hash.None && hasher != nil {
srcHash := hasher.Sums()[ht]
var dstHash string
dstHash, err = o.Hash(ctx, ht)
dstHash, err = o.Hash(ht)
if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash")
}
if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object
err = o.Remove(ctx)
err = o.Remove()
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
@@ -374,13 +366,13 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(ctx, in, src, options, f.Fs.Put)
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(in, src, options, f.Fs.Put)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(ctx, in, src, options, f.Fs.Features().PutStream)
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(in, src, options, f.Fs.Features().PutStream)
}
// Hashes returns the supported hash sets.
@@ -391,15 +383,15 @@ func (f *Fs) Hashes() hash.Set {
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
func (f *Fs) Mkdir(dir string) error {
return f.Fs.Mkdir(f.cipher.EncryptDirName(dir))
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
func (f *Fs) Rmdir(dir string) error {
return f.Fs.Rmdir(f.cipher.EncryptDirName(dir))
}
// Purge all files in the root and the root directory
@@ -408,12 +400,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context) error {
func (f *Fs) Purge() error {
do := f.Fs.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do(ctx)
return do()
}
// Copy src to this remote using server side copy operations.
@@ -425,7 +417,7 @@ func (f *Fs) Purge(ctx context.Context) error {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
@@ -434,7 +426,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
if !ok {
return nil, fs.ErrorCantCopy
}
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil {
return nil, err
}
@@ -450,7 +442,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Move
if do == nil {
return nil, fs.ErrorCantMove
@@ -459,7 +451,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if !ok {
return nil, fs.ErrorCantMove
}
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil {
return nil, err
}
@@ -474,7 +466,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
do := f.Fs.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
@@ -484,14 +476,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
return do(ctx, srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
}
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutUnchecked
if do == nil {
return nil, errors.New("can't PutUnchecked")
@@ -500,7 +492,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if err != nil {
return nil, err
}
o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
o, err := do(wrappedIn, f.newObjectInfo(src))
if err != nil {
return nil, err
}
@@ -511,21 +503,21 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) CleanUp() error {
do := f.Fs.Features().CleanUp
if do == nil {
return errors.New("can't CleanUp")
}
return do(ctx)
return do()
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
func (f *Fs) About() (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("About not supported")
}
return do(ctx)
return do()
}
// UnWrap returns the Fs that this Fs is wrapping
@@ -533,16 +525,6 @@ func (f *Fs) UnWrap() fs.Fs {
return f.Fs
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// EncryptFileName returns an encrypted file name
func (f *Fs) EncryptFileName(fileName string) string {
return f.cipher.EncryptFileName(fileName)
@@ -554,13 +536,13 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
}
// ComputeHash takes the nonce from o, and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly
// src with it, and calcuates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Read the nonce - opening the file is sufficient to read the nonce in
// use a limited read so we only read the header
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce")
}
@@ -590,7 +572,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
}
// Open the src for input
in, err = src.Open(ctx)
in, err = src.Open()
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
@@ -615,75 +597,6 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
return m.Sums()[hashType], nil
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
do := f.Fs.Features().MergeDirs
if do == nil {
return errors.New("MergeDirs not supported")
}
out := make([]fs.Directory, len(dirs))
for i, dir := range dirs {
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
}
return do(ctx, out)
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
do := f.Fs.Features().DirCacheFlush
if do != nil {
do()
}
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
do := f.Fs.Features().PublicLink
if do == nil {
return "", errors.New("PublicLink not supported")
}
o, err := f.NewObject(ctx, remote)
if err != nil {
// assume it is a directory
return do(ctx, f.cipher.EncryptDirName(remote))
}
return do(ctx, o.(*Object).Object.Remote())
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
do := f.Fs.Features().ChangeNotify
if do == nil {
return
}
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
var (
err error
decrypted string
)
switch entryType {
case fs.EntryDirectory:
decrypted, err = f.cipher.DecryptDirName(path)
case fs.EntryObject:
decrypted, err = f.cipher.DecryptFileName(path)
default:
fs.Errorf(path, "crypt ChangeNotify: ignoring unknown EntryType %d", entryType)
return
}
if err != nil {
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
return
}
notifyFunc(decrypted, entryType)
}
do(ctx, wrappedNotifyFunc, pollIntervalChan)
}
// Object describes a wrapped for being read from the Fs
//
// This decrypts the remote name and decrypts the data
@@ -734,7 +647,7 @@ func (o *Object) Size() int64 {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
func (o *Object) Hash(ht hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
@@ -744,7 +657,7 @@ func (o *Object) UnWrap() fs.Object {
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1
for _, option := range options {
@@ -758,10 +671,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
openOptions = append(openOptions, option)
}
}
rc, err = o.f.cipher.DecryptDataSeek(ctx, func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
if underlyingOffset == 0 && underlyingLimit < 0 {
// Open with no seek
return o.Object.Open(ctx, openOptions...)
return o.Object.Open(openOptions...)
}
// Open stream with a range of underlyingOffset, underlyingLimit
end := int64(-1)
@@ -772,7 +685,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
}
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
return o.Object.Open(ctx, newOpenOptions...)
return o.Object.Open(newOpenOptions...)
}, offset, limit)
if err != nil {
return nil, err
@@ -781,43 +694,25 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return o.Object, o.Object.Update(ctx, in, src, options...)
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return o.Object, o.Object.Update(in, src, options...)
}
_, err := o.f.put(ctx, in, src, options, update)
_, err := o.f.put(in, src, options, update)
return err
}
// newDir returns a dir with the Name decrypted
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(ctx, dir)
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
new := fs.NewDirCopy(dir)
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debugf(remote, "Undecryptable dir name: %v", err)
} else {
newDir.SetRemote(decryptedRemote)
new.SetRemote(decryptedRemote)
}
return newDir
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.Fs.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.Fs.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
return new
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
@@ -856,38 +751,10 @@ func (o *ObjectInfo) Size() int64 {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
return "", nil
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
func (o *Object) SetTier(tier string) error {
do, ok := o.Object.(fs.SetTierer)
if !ok {
return errors.New("crypt: underlying remote does not support SetTier")
}
return do.SetTier(tier)
}
// GetTier returns storage tier or class of the Object
func (o *Object) GetTier() string {
do, ok := o.Object.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -901,17 +768,7 @@ var (
_ fs.UnWrapper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.GetTierer = (*Object)(nil)
)

View File

@@ -6,33 +6,14 @@ import (
"path/filepath"
"testing"
"github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive" // for integration tests
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/swift" // for integration tests
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
// TestStandard runs integration tests against the remote
func TestStandard(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
@@ -44,16 +25,11 @@ func TestStandard(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
// TestOff runs integration tests against the remote
func TestOff(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
name := "TestCrypt2"
fstests.Run(t, &fstests.Opt{
@@ -65,16 +41,11 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
// TestObfuscate runs integration tests against the remote
func TestObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt3"
fstests.Run(t, &fstests.Opt{
@@ -86,8 +57,6 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
SkipBadWindowsCharacters: true,
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,81 +1,63 @@
package drive
import (
"bytes"
"context"
"encoding/json"
"io"
"io/ioutil"
"mime"
"path/filepath"
"strings"
"testing"
"github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
func TestDriveScopes(t *testing.T) {
for _, test := range []struct {
in string
want []string
wantFlag bool
}{
{"", []string{
"https://www.googleapis.com/auth/drive",
}, false},
{" drive.file , drive.readonly", []string{
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive.readonly",
}, false},
{" drive.file , drive.appfolder", []string{
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive.appfolder",
}, true},
} {
got := driveScopes(test.in)
assert.Equal(t, test.want, got, test.in)
gotFlag := driveScopesContainsAppFolder(got)
assert.Equal(t, test.wantFlag, gotFlag, test.in)
}
}
const exampleExportFormats = `{
"application/vnd.google-apps.document": [
"application/rtf",
"application/vnd.oasis.opendocument.text",
"text/html",
"application/pdf",
"application/epub+zip",
"application/zip",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"text/plain"
],
"application/vnd.google-apps.spreadsheet": [
"application/x-vnd.oasis.opendocument.spreadsheet",
"text/tab-separated-values",
"application/pdf",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"text/csv",
"application/zip",
"application/vnd.oasis.opendocument.spreadsheet"
],
"application/vnd.google-apps.jam": [
"application/pdf"
],
"application/vnd.google-apps.script": [
"application/vnd.google-apps.script+json"
],
"application/vnd.google-apps.presentation": [
"application/vnd.oasis.opendocument.presentation",
"application/pdf",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
"text/plain"
],
"application/vnd.google-apps.form": [
"application/zip"
],
"application/vnd.google-apps.drawing": [
"image/svg+xml",
"image/png",
"application/pdf",
"image/jpeg"
]
}`
/*
var additionalMimeTypes = map[string]string{
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
"application/vnd.ms-excel.template.macroenabled.12": ".xltm",
"application/vnd.ms-powerpoint.presentation.macroenabled.12": ".pptm",
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": ".ppsm",
"application/vnd.ms-powerpoint.template.macroenabled.12": ".potm",
"application/vnd.ms-powerpoint": ".ppt",
"application/vnd.ms-word.document.macroenabled.12": ".docm",
"application/vnd.ms-word.template.macroenabled.12": ".dotm",
"application/vnd.openxmlformats-officedocument.presentationml.template": ".potx",
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": ".xltx",
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": ".dotx",
"application/vnd.sun.xml.writer": ".sxw",
"text/richtext": ".rtf",
}
*/
var exportFormats map[string][]string
// Load the example export formats into exportFormats for testing
func TestInternalLoadExampleFormats(t *testing.T) {
fetchFormatsOnce.Do(func() {})
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
var about struct {
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
ImportFormats map[string][]string `json:"importFormats,omitempty"`
}
require.NoError(t, err)
require.NoError(t, json.Unmarshal(buf, &about))
_exportFormats = fixMimeTypeMap(about.ExportFormats)
_importFormats = fixMimeTypeMap(about.ImportFormats)
func TestInternalLoadExampleExportFormats(t *testing.T) {
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &exportFormats))
}
func TestInternalParseExtensions(t *testing.T) {
@@ -84,204 +66,47 @@ func TestInternalParseExtensions(t *testing.T) {
want []string
wantErr error
}{
{"doc", []string{".doc"}, nil},
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
{"doc", []string{"doc"}, nil},
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
{"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
} {
extensions, _, gotErr := parseExtensions(test.in)
f := new(Fs)
gotErr := f.parseExtensions(test.in)
if test.wantErr == nil {
assert.NoError(t, gotErr)
} else {
assert.EqualError(t, gotErr, test.wantErr.Error())
}
assert.Equal(t, test.want, extensions)
assert.Equal(t, test.want, f.extensions)
}
// Test it is appending
extensions, _, gotErr := parseExtensions("docx,svg", "docx,svg,xlsx")
assert.NoError(t, gotErr)
assert.Equal(t, []string{".docx", ".svg", ".xlsx"}, extensions)
f := new(Fs)
assert.Nil(t, f.parseExtensions("docx,svg"))
assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
}
func TestInternalFindExportFormat(t *testing.T) {
item := &drive.File{
Name: "file",
MimeType: "application/vnd.google-apps.document",
}
item := new(drive.File)
item.MimeType = "application/vnd.google-apps.document"
for _, test := range []struct {
extensions []string
wantExtension string
wantMimeType string
}{
{[]string{}, "", ""},
{[]string{".pdf"}, ".pdf", "application/pdf"},
{[]string{".pdf", ".rtf", ".xls"}, ".pdf", "application/pdf"},
{[]string{".xls", ".rtf", ".pdf"}, ".rtf", "application/rtf"},
{[]string{".xls", ".csv", ".svg"}, "", ""},
{[]string{"pdf"}, "pdf", "application/pdf"},
{[]string{"pdf", "rtf", "xls"}, "pdf", "application/pdf"},
{[]string{"xls", "rtf", "pdf"}, "rtf", "application/rtf"},
{[]string{"xls", "csv", "svg"}, "", ""},
} {
f := new(Fs)
f.exportExtensions = test.extensions
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
f.extensions = test.extensions
gotExtension, gotMimeType := f.findExportFormat("file", exportFormats[item.MimeType])
assert.Equal(t, test.wantExtension, gotExtension)
if test.wantExtension != "" {
assert.Equal(t, item.Name+gotExtension, gotFilename)
} else {
assert.Equal(t, "", gotFilename)
}
assert.Equal(t, test.wantMimeType, gotMimeType)
assert.Equal(t, true, gotIsDocument)
}
}
func TestMimeTypesToExtension(t *testing.T) {
for mimeType, extension := range _mimeTypeToExtension {
extensions, err := mime.ExtensionsByType(mimeType)
assert.NoError(t, err)
assert.Contains(t, extensions, extension)
}
}
func TestExtensionToMimeType(t *testing.T) {
for mimeType, extension := range _mimeTypeToExtension {
gotMimeType := mime.TypeByExtension(extension)
mediatype, _, err := mime.ParseMediaType(gotMimeType)
assert.NoError(t, err)
assert.Equal(t, mimeType, mediatype)
}
}
func TestExtensionsForExportFormats(t *testing.T) {
if _exportFormats == nil {
t.Error("exportFormats == nil")
}
for fromMT, toMTs := range _exportFormats {
for _, toMT := range toMTs {
if !isInternalMimeType(toMT) {
extensions, err := mime.ExtensionsByType(toMT)
assert.NoError(t, err, "invalid MIME type %q", toMT)
assert.NotEmpty(t, extensions, "No extension found for %q (from: %q)", fromMT, toMT)
}
}
}
}
func TestExtensionsForImportFormats(t *testing.T) {
t.Skip()
if _importFormats == nil {
t.Error("_importFormats == nil")
}
for fromMT := range _importFormats {
if !isInternalMimeType(fromMT) {
extensions, err := mime.ExtensionsByType(fromMT)
assert.NoError(t, err, "invalid MIME type %q", fromMT)
assert.NotEmpty(t, extensions, "No extension found for %q", fromMT)
}
}
}
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
oldAllow := f.opt.AllowImportNameChange
f.opt.AllowImportNameChange = true
defer func() {
f.opt.AllowImportNameChange = oldAllow
}()
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err)
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.doc", "example2.doc")
require.NoError(t, err)
}
func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err)
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.xlsx", "example1.ods")
require.NoError(t, err)
}
func (f *Fs) InternalTestDocumentExport(t *testing.T) {
var buf bytes.Buffer
var err error
f.exportExtensions, _, err = parseExtensions("txt")
require.NoError(t, err)
obj, err := f.NewObject(context.Background(), "example2.txt")
require.NoError(t, err)
rc, err := obj.Open(context.Background())
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
_, err = io.Copy(&buf, rc)
require.NoError(t, err)
text := buf.String()
for _, excerpt := range []string{
"Lorem ipsum dolor sit amet, consectetur",
"porta at ultrices in, consectetur at augue.",
} {
require.Contains(t, text, excerpt)
}
}
func (f *Fs) InternalTestDocumentLink(t *testing.T) {
var buf bytes.Buffer
var err error
f.exportExtensions, _, err = parseExtensions("link.html")
require.NoError(t, err)
obj, err := f.NewObject(context.Background(), "example2.link.html")
require.NoError(t, err)
rc, err := obj.Open(context.Background())
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
_, err = io.Copy(&buf, rc)
require.NoError(t, err)
text := buf.String()
require.True(t, strings.HasPrefix(text, "<html>"))
require.True(t, strings.HasSuffix(text, "</html>\n"))
for _, excerpt := range []string{
`<meta http-equiv="refresh"`,
`Loading <a href="`,
} {
require.Contains(t, text, excerpt)
}
}
func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
f.InternalTestDocumentImport(t)
t.Run("DocumentUpdate", func(t *testing.T) {
f.InternalTestDocumentUpdate(t)
t.Run("DocumentExport", func(t *testing.T) {
f.InternalTestDocumentExport(t)
t.Run("DocumentLink", func(t *testing.T) {
f.InternalTestDocumentLink(t)
})
})
})
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1,35 +1,17 @@
// Test Drive filesystem interface
package drive
package drive_test
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDrive:",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
CeilChunkSize: fstests.NextPowerOfTwo,
},
NilObject: (*drive.Object)(nil),
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -1,178 +0,0 @@
{
"importFormats": {
"text/tab-separated-values": [
"application/vnd.google-apps.spreadsheet"
],
"application/x-vnd.oasis.opendocument.presentation": [
"application/vnd.google-apps.presentation"
],
"image/jpeg": [
"application/vnd.google-apps.document"
],
"image/bmp": [
"application/vnd.google-apps.document"
],
"image/gif": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-excel.sheet.macroenabled.12": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-powerpoint.presentation.macroenabled.12": [
"application/vnd.google-apps.presentation"
],
"application/vnd.ms-word.template.macroenabled.12": [
"application/vnd.google-apps.document"
],
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": [
"application/vnd.google-apps.document"
],
"image/pjpeg": [
"application/vnd.google-apps.document"
],
"application/vnd.google-apps.script+text/plain": [
"application/vnd.google-apps.script"
],
"application/vnd.ms-excel": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.sun.xml.writer": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-word.document.macroenabled.12": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": [
"application/vnd.google-apps.presentation"
],
"text/rtf": [
"application/vnd.google-apps.document"
],
"text/plain": [
"application/vnd.google-apps.document"
],
"application/vnd.oasis.opendocument.spreadsheet": [
"application/vnd.google-apps.spreadsheet"
],
"application/x-vnd.oasis.opendocument.spreadsheet": [
"application/vnd.google-apps.spreadsheet"
],
"image/png": [
"application/vnd.google-apps.document"
],
"application/x-vnd.oasis.opendocument.text": [
"application/vnd.google-apps.document"
],
"application/msword": [
"application/vnd.google-apps.document"
],
"application/pdf": [
"application/vnd.google-apps.document"
],
"application/json": [
"application/vnd.google-apps.script"
],
"application/x-msmetafile": [
"application/vnd.google-apps.drawing"
],
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.ms-powerpoint": [
"application/vnd.google-apps.presentation"
],
"application/vnd.ms-excel.template.macroenabled.12": [
"application/vnd.google-apps.spreadsheet"
],
"image/x-bmp": [
"application/vnd.google-apps.document"
],
"application/rtf": [
"application/vnd.google-apps.document"
],
"application/vnd.openxmlformats-officedocument.presentationml.template": [
"application/vnd.google-apps.presentation"
],
"image/x-png": [
"application/vnd.google-apps.document"
],
"text/html": [
"application/vnd.google-apps.document"
],
"application/vnd.oasis.opendocument.text": [
"application/vnd.google-apps.document"
],
"application/vnd.openxmlformats-officedocument.presentationml.presentation": [
"application/vnd.google-apps.presentation"
],
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.google-apps.script+json": [
"application/vnd.google-apps.script"
],
"application/vnd.openxmlformats-officedocument.presentationml.slideshow": [
"application/vnd.google-apps.presentation"
],
"application/vnd.ms-powerpoint.template.macroenabled.12": [
"application/vnd.google-apps.presentation"
],
"text/csv": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.oasis.opendocument.presentation": [
"application/vnd.google-apps.presentation"
],
"image/jpg": [
"application/vnd.google-apps.document"
],
"text/richtext": [
"application/vnd.google-apps.document"
]
},
"exportFormats": {
"application/vnd.google-apps.document": [
"application/rtf",
"application/vnd.oasis.opendocument.text",
"text/html",
"application/pdf",
"application/epub+zip",
"application/zip",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"text/plain"
],
"application/vnd.google-apps.spreadsheet": [
"application/x-vnd.oasis.opendocument.spreadsheet",
"text/tab-separated-values",
"application/pdf",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"text/csv",
"application/zip",
"application/vnd.oasis.opendocument.spreadsheet"
],
"application/vnd.google-apps.jam": [
"application/pdf"
],
"application/vnd.google-apps.script": [
"application/vnd.google-apps.script+json"
],
"application/vnd.google-apps.presentation": [
"application/vnd.oasis.opendocument.presentation",
"application/pdf",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
"text/plain"
],
"application/vnd.google-apps.form": [
"application/zip"
],
"application/vnd.google-apps.drawing": [
"image/svg+xml",
"image/png",
"application/pdf",
"image/jpeg"
]
}
}

View File

@@ -19,10 +19,10 @@ import (
"regexp"
"strconv"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/readers"
"google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
)
@@ -50,13 +50,14 @@ type resumableUpload struct {
}
// Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
params := url.Values{
"alt": {"json"},
"uploadType": {"resumable"},
"fields": {partialFields},
func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
params := make(url.Values)
params.Set("alt", "json")
params.Set("uploadType", "resumable")
params.Set("fields", partialFields)
if f.isTeamDrive {
params.Set("supportsTeamDrives", "true")
}
params.Set("supportsAllDrives", "true")
if f.opt.KeepRevisionForever {
params.Set("keepRevisionForever", "true")
}
@@ -181,7 +182,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
// been 200 OK.
//
// So parse the response out of the body. We aren't expecting
// any other 2xx codes, so we parse it unconditionally on
// any other 2xx codes, so we parse it unconditionaly on
// StatusCode
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
return 598, err

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"testing"
"github.com/rclone/rclone/backend/dropbox/dbhash"
"github.com/ncw/rclone/backend/dropbox/dbhash"
"github.com/stretchr/testify/assert"
)

View File

@@ -22,7 +22,6 @@ of path_display and all will be well.
*/
import (
"context"
"fmt"
"io"
"log"
@@ -32,23 +31,21 @@ import (
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/oauth2"
)
@@ -82,8 +79,8 @@ const (
// Choose 48MB which is 91% of Maximum speed. rclone by
// default does 4 transfers so this should use 4*48MB = 192MB
// by default.
defaultChunkSize = 48 * fs.MebiByte
maxChunkSize = 150 * fs.MebiByte
defaultChunkSize = 48 * 1024 * 1024
maxChunkSize = 150 * 1024 * 1024
)
var (
@@ -123,21 +120,9 @@ func init() {
Name: config.ConfigClientSecret,
Help: "Dropbox App Client Secret\nLeave blank normally.",
}, {
Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size. (< %v).
Any files larger than this will be uploaded in chunks of this size.
Note that chunks are buffered in memory (one at a time) so rclone can
deal with retries. Setting this larger will increase the speed
slightly (at most 10%% for 128MB in tests) at the cost of using more
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "impersonate",
Help: "Impersonate this user when using a business account.",
Default: "",
Name: "chunk_size",
Help: fmt.Sprintf("Upload chunk size. Max %v.", fs.SizeSuffix(maxChunkSize)),
Default: fs.SizeSuffix(defaultChunkSize),
Advanced: true,
}},
})
@@ -145,8 +130,7 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
}
// Fs represents a remote dropbox server
@@ -158,10 +142,9 @@ type Fs struct {
srv files.Client // the connection to the dropbox server
sharing sharing.Client // as above, but for generating sharing links
users users.Client // as above, but for accessing user information
team team.Client // for the Teams API
slashRoot string // root with "/" prefix, lowercase
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls
pacer *pacer.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
}
@@ -205,42 +188,14 @@ func shouldRetry(err error) (bool, error) {
return false, err
}
baseErrString := errors.Cause(err).Error()
// handle any official Retry-After header from Dropbox's SDK first
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
}
return true, err
}
// Keep old behavior for backward compatibility
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
// FIXME there is probably a better way of doing this!
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
return true, err
}
return fserrors.ShouldRetry(err), err
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// NewFs constructs an Fs from the path, container:path
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
@@ -248,9 +203,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "dropbox: chunk size")
if opt.ChunkSize > maxChunkSize {
return nil, errors.Errorf("chunk size too big, must be < %v", maxChunkSize)
}
// Convert the old token if it exists. The old token was just
@@ -274,36 +228,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f := &Fs{
name: name,
opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
}
config := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
Client: oAuthClient, // maybe???
HeaderGenerator: f.headerGenerator,
}
// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
f.team = team.New(config)
if opt.Impersonate != "" {
user := team.UserSelectorArg{
Email: opt.Impersonate,
}
user.Tag = "email"
members := []*team.UserSelectorArg{&user}
args := team.NewMembersGetInfoArgs(members)
memberIds, err := f.team.MembersGetInfo(args)
if err != nil {
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
}
config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
}
f.srv = files.New(config)
f.sharing = sharing.New(config)
f.users = users.New(config)
@@ -442,7 +373,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
@@ -455,7 +386,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
root := f.slashRoot
if dir != "" {
root += "/" + dir
@@ -542,22 +473,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
return o, o.Update(ctx, in, src, options...)
return o, o.Update(in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
func (f *Fs) Mkdir(dir string) error {
root := path.Join(f.slashRoot, dir)
// can't create or run metadata on root
@@ -587,7 +518,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
func (f *Fs) Rmdir(dir string) error {
root := path.Join(f.slashRoot, dir)
// can't remove root
@@ -643,7 +574,7 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
@@ -688,7 +619,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) (err error) {
func (f *Fs) Purge() (err error) {
// Let dropbox delete the filesystem tree
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
@@ -706,7 +637,7 @@ func (f *Fs) Purge(ctx context.Context) (err error) {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
@@ -746,7 +677,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
func (f *Fs) PublicLink(remote string) (link string, err error) {
absPath := "/" + path.Join(f.Root(), remote)
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
createArg := sharing.CreateSharedLinkWithSettingsArg{
@@ -799,7 +730,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -835,7 +766,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
func (f *Fs) About() (usage *fs.Usage, err error) {
var q *users.SpaceUsage
err = f.pacer.Call(func() (bool, error) {
q, err = f.users.GetSpaceUsage()
@@ -887,7 +818,7 @@ func (o *Object) Remote() string {
}
// Hash returns the dropbox special hash
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.Dropbox {
return "", hash.ErrUnsupported
}
@@ -949,7 +880,7 @@ func (o *Object) readMetaData() (err error) {
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Debugf(o, "Failed to read metadata: %v", err)
@@ -961,7 +892,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// SetModTime sets the modification time of the local fs object
//
// Commits the datastore
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
func (o *Object) SetModTime(modTime time.Time) error {
// Dropbox doesn't have a way of doing this so returning this
// error will cause the file to be deleted first then
// re-uploaded to set the time.
@@ -974,8 +905,7 @@ func (o *Object) Storable() bool {
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.bytes)
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1101,7 +1031,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
remote := o.remotePath()
if ignoredFiles.MatchString(remote) {
fs.Logf(o, "File name disallowed - not uploading")
@@ -1110,7 +1040,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
commitInfo := files.NewCommitInfo(o.remotePath())
commitInfo.Mode.Tag = "overwrite"
// The Dropbox API only accepts timestamps in UTC with second precision.
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
commitInfo.ClientModified = src.ModTime().UTC().Round(time.Second)
size := src.Size()
var err error
@@ -1130,7 +1060,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
func (o *Object) Remove() (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
return shouldRetry(err)

View File

@@ -1,26 +1,17 @@
// Test Dropbox filesystem interface
package dropbox
package dropbox_test
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/dropbox"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDropbox:",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
NilObject: (*dropbox.Object)(nil),
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -1,389 +0,0 @@
package fichier
import (
"context"
"io"
"net/http"
"regexp"
"strconv"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
)
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
func (f *Fs) getDownloadToken(url string) (*GetTokenResponse, error) {
request := DownloadRequest{
URL: url,
Single: 1,
}
opts := rest.Opts{
Method: "POST",
Path: "/download/get_token.cgi",
}
var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, &request, &token)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
return &token, nil
}
func fileFromSharedFile(file *SharedFile) File {
return File{
URL: file.Link,
Filename: file.Filename,
Size: file.Size,
}
}
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
}
var sharedFiles SharedFolderResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, nil, &sharedFiles)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
entries = make([]fs.DirEntry, len(sharedFiles))
for i, sharedFile := range sharedFiles {
entries[i] = f.newObjectFromFile(ctx, "", fileFromSharedFile(&sharedFile))
}
return entries, nil
}
func (f *Fs) listFiles(directoryID int) (filesList *FilesList, err error) {
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
request := ListFilesRequest{
FolderID: directoryID,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/ls.cgi",
}
filesList = &FilesList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, &request, filesList)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
return filesList, nil
}
func (f *Fs) listFolders(directoryID int) (foldersList *FoldersList, err error) {
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
request := ListFolderRequest{
FolderID: directoryID,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/ls.cgi",
}
foldersList = &FoldersList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, &request, foldersList)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list folders")
}
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
return foldersList, err
}
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
files, err := f.listFiles(folderID)
if err != nil {
return nil, err
}
folders, err := f.listFolders(folderID)
if err != nil {
return nil, err
}
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
for i, item := range files.Items {
item.Filename = restoreReservedChars(item.Filename)
entries[i] = f.newObjectFromFile(ctx, dir, item)
}
for i, folder := range folders.SubFolders {
createDate, err := time.Parse("2006-01-02 15:04:05", folder.CreateDate)
if err != nil {
return nil, err
}
folder.Name = restoreReservedChars(folder.Name)
fullPath := getRemote(dir, folder.Name)
folderID := strconv.Itoa(folder.ID)
entries[len(files.Items)+i] = fs.NewDir(fullPath, createDate).SetID(folderID)
// fs.Debugf(f, "Put Path `%s` for id `%d` into dircache", fullPath, folder.ID)
f.dirCache.Put(fullPath, folderID)
}
return entries, nil
}
func (f *Fs) newObjectFromFile(ctx context.Context, dir string, item File) *Object {
return &Object{
fs: f,
remote: getRemote(dir, item.Filename),
file: item,
}
}
func getRemote(dir, fileName string) string {
if dir == "" {
return fileName
}
return dir + "/" + fileName
}
func (f *Fs) makeFolder(leaf string, folderID int) (response *MakeFolderResponse, err error) {
name := replaceReservedChars(leaf)
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
request := MakeFolderRequest{
FolderID: folderID,
Name: name,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mkdir.cgi",
}
response = &MakeFolderResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, &request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create folder")
}
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
return response, err
}
func (f *Fs) removeFolder(name string, folderID int) (response *GenericOKResponse, err error) {
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
request := &RemoveFolderRequest{
FolderID: folderID,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/rm.cgi",
}
response = &GenericOKResponse{}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(&opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove folder")
}
if response.Status != "OK" {
return nil, errors.New("Can't remove non-empty dir")
}
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
return response, nil
}
func (f *Fs) deleteFile(url string) (response *GenericOKResponse, err error) {
request := &RemoveFileRequest{
Files: []RmFile{
{url},
},
}
opts := rest.Opts{
Method: "POST",
Path: "/file/rm.cgi",
}
response = &GenericOKResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove file")
}
// fs.Debugf(f, "Removed file with url `%s`", url)
return response, nil
}
func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node")
opts := rest.Opts{
Method: "GET",
ContentType: "application/json", // 1Fichier API is bad
Path: "/upload/get_upload_server.cgi",
}
response = &GetUploadNodeResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, nil, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "didnt got an upload node")
}
// fs.Debugf(f, "Got Upload node")
return response, err
}
func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
// fs.Debugf(f, "Uploading File `%s`", fileName)
fileName = replaceReservedChars(fileName)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
}
opts := rest.Opts{
Method: "POST",
Path: "/upload.cgi",
Parameters: map[string][]string{
"id": {uploadID},
},
NoResponse: true,
Body: in,
ContentLength: &size,
MultipartContentName: "file[]",
MultipartFileName: fileName,
MultipartParams: map[string][]string{
"did": {folderID},
},
}
if node != "" {
opts.RootURL = "https://" + node
}
err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, nil, nil)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't upload file")
}
// fs.Debugf(f, "Uploaded File `%s`", fileName)
return response, err
}
func (f *Fs) endUpload(uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
}
opts := rest.Opts{
Method: "GET",
Path: "/end.pl",
RootURL: "https://" + nodeurl,
Parameters: map[string][]string{
"xid": {uploadID},
},
ExtraHeaders: map[string]string{
"JSON": "1",
},
}
response = &EndFileUploadResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, nil, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't finish file upload")
}
return response, err
}

View File

@@ -1,411 +0,0 @@
package fichier
import (
"context"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
func init() {
fs.Register(&fs.RegInfo{
Name: "fichier",
Description: "1Fichier",
Config: func(name string, config configmap.Mapper) {
},
NewFs: NewFs,
Options: []fs.Option{
{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Name: "api_key",
},
{
Help: "If you want to download a shared folder, add this parameter",
Name: "shared_folder",
Required: false,
Advanced: true,
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
}
// Fs is the interface a cloud storage system must provide
type Fs struct {
root string
name string
features *fs.Features
dirCache *dircache.DirCache
baseClient *http.Client
options *Options
pacer *fs.Pacer
rest *rest.Client
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
folderID, err := strconv.Atoi(pathID)
if err != nil {
return "", false, err
}
folders, err := f.listFolders(folderID)
if err != nil {
return "", false, err
}
for _, folder := range folders.SubFolders {
if folder.Name == leaf {
pathIDOut := strconv.Itoa(folder.ID)
return pathIDOut, true, nil
}
}
return "", false, nil
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
folderID, err := strconv.Atoi(pathID)
if err != nil {
return "", err
}
resp, err := f.makeFolder(leaf, folderID)
if err != nil {
return "", err
}
return strconv.Itoa(resp.FolderID), err
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("1Fichier root '%s'", f.root)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.Whirlpool)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// NewFs makes a new Fs object from the path
//
// The path is of the form remote:path
//
// Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned.
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
func NewFs(name string, rootleaf string, config configmap.Mapper) (fs.Fs, error) {
root := replaceReservedChars(rootleaf)
opt := new(Options)
err := configstruct.Set(config, opt)
if err != nil {
return nil, err
}
// If using a Shared Folder override root
if opt.SharedFolder != "" {
root = ""
}
//workaround for wonky parser
root = strings.Trim(root, "/")
f := &Fs{
name: name,
root: root,
options: opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
baseClient: &http.Client{},
}
f.features = (&fs.Features{
DuplicateFiles: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
client := fshttp.NewClient(fs.Config)
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
f.rest.SetHeader("Authorization", "Bearer "+f.options.APIKey)
f.dirCache = dircache.New(root, rootID, f)
ctx := context.Background()
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
return f, nil
}
return nil, err
}
f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.options.SharedFolder != "" {
return f.listSharedFiles(ctx, f.options.SharedFolder)
}
dirContent, err := f.listDir(ctx, dir)
if err != nil {
return nil, err
}
return dirContent, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
files, err := f.listFiles(folderID)
if err != nil {
return nil, err
}
for _, file := range files.Items {
if file.Filename == leaf {
path, ok := f.dirCache.GetInv(directoryID)
if !ok {
return nil, errors.New("Cannot find dir in dircache")
}
return f.newObjectFromFile(ctx, path, file), nil
}
}
return nil, fs.ErrorObjectNotFound
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
default:
return nil, err
}
}
// putUnchecked uploads the object with the given name and size
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(100E9) {
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
nodeResponse, err := f.getUploadNode()
if err != nil {
return nil, err
}
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
_, err = f.uploadFile(in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
fileUploadResponse, err := f.endUpload(nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
if len(fileUploadResponse.Links) != 1 {
return nil, errors.New("unexpected amount of files")
}
link := fileUploadResponse.Links[0]
fileSize, err := strconv.ParseInt(link.Size, 10, 64)
if err != nil {
return nil, err
}
return &Object{
fs: f,
remote: remote,
file: File{
ACL: 0,
CDN: 0,
Checksum: link.Whirlpool,
ContentType: "",
Date: time.Now().Format("2006-01-02 15:04:05"),
Filename: link.Filename,
Pass: 0,
Size: int(fileSize),
URL: link.Download,
},
}, nil
}
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return err
}
_, err = f.removeFolder(dir, folderID)
if err != nil {
return err
}
f.dirCache.FlushDir(dir)
return nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ dircache.DirCacher = (*Fs)(nil)
)

View File

@@ -1,17 +0,0 @@
// Test 1Fichier filesystem interface
package fichier
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fs.Config.LogLevel = fs.LogLevelDebug
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFichier:",
})
}

View File

@@ -1,158 +0,0 @@
package fichier
import (
"context"
"io"
"net/http"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
)
// Object is a filesystem like object provided by an Fs
type Object struct {
fs *Fs
remote string
file File
}
// String returns a description of the Object
func (o *Object) String() string {
return o.file.Filename
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime(ctx context.Context) time.Time {
modTime, err := time.Parse("2006-01-02 15:04:05", o.file.Date)
if err != nil {
return time.Now()
}
return modTime
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return int64(o.file.Size)
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.Whirlpool {
return "", hash.ErrUnsupported
}
return o.file.Checksum, nil
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(context.Context, time.Time) error {
return fs.ErrorCantSetModTime
//return errors.New("setting modtime is not supported for 1fichier remotes")
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
fs.FixRangeOption(options, int64(o.file.Size))
downloadToken, err := o.fs.getDownloadToken(o.file.URL)
if err != nil {
return nil, err
}
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: downloadToken.URL,
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.rest.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return resp.Body, err
}
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if src.Size() < 0 {
return errors.New("refusing to update with unknown size")
}
// upload with new size but old name
info, err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...)
if err != nil {
return err
}
// Delete duplicate after successful upload
err = o.Remove(ctx)
if err != nil {
return errors.Wrap(err, "failed to remove old version")
}
// Replace guts of old object with new one
*o = *info.(*Object)
return nil
}
// Remove removes this object
func (o *Object) Remove(ctx context.Context) error {
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
_, err := o.fs.deleteFile(o.file.URL)
if err != nil {
return err
}
return nil
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.file.ContentType
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.file.URL
}
// Check the interfaces are satisfied
var (
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -1,71 +0,0 @@
/*
Translate file names for 1fichier
1Fichier reserved characters
The following characters are 1Fichier reserved characters, and can't
be used in 1Fichier folder and file names.
*/
package fichier
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// 1Fichier has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
'\'': '', // FULLWIDTH APOSTROPHE
'$': '', // FULLWIDTH DOLLAR SIGN
'`': '', // FULLWIDTH GRAVE ACCENT
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// file names can't start with space either
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Replace reserved characters
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@@ -1,24 +0,0 @@
package fichier
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{"\"'<>/\\$`", `/`},
{" leading space", "␠leading space"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

View File

@@ -1,120 +0,0 @@
package fichier
// ListFolderRequest is the request structure of the corresponding request
type ListFolderRequest struct {
FolderID int `json:"folder_id"`
}
// ListFilesRequest is the request structure of the corresponding request
type ListFilesRequest struct {
FolderID int `json:"folder_id"`
}
// DownloadRequest is the request structure of the corresponding request
type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
}
// RemoveFolderRequest is the request structure of the corresponding request
type RemoveFolderRequest struct {
FolderID int `json:"folder_id"`
}
// RemoveFileRequest is the request structure of the corresponding request
type RemoveFileRequest struct {
Files []RmFile `json:"files"`
}
// RmFile is the request structure of the corresponding request
type RmFile struct {
URL string `json:"url"`
}
// GenericOKResponse is the response structure of the corresponding request
type GenericOKResponse struct {
Status string `json:"status"`
Message string `json:"message"`
}
// MakeFolderRequest is the request structure of the corresponding request
type MakeFolderRequest struct {
Name string `json:"name"`
FolderID int `json:"folder_id"`
}
// MakeFolderResponse is the response structure of the corresponding request
type MakeFolderResponse struct {
Name string `json:"name"`
FolderID int `json:"folder_id"`
}
// GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct {
ID string `json:"id"`
URL string `json:"url"`
}
// GetTokenResponse is the response structure of the corresponding request
type GetTokenResponse struct {
URL string `json:"url"`
Status string `json:"Status"`
Message string `json:"Message"`
}
// SharedFolderResponse is the response structure of the corresponding request
type SharedFolderResponse []SharedFile
// SharedFile is the structure how 1Fichier returns a shared File
type SharedFile struct {
Filename string `json:"filename"`
Link string `json:"link"`
Size int `json:"size"`
}
// EndFileUploadResponse is the response structure of the corresponding request
type EndFileUploadResponse struct {
Incoming int `json:"incoming"`
Links []struct {
Download string `json:"download"`
Filename string `json:"filename"`
Remove string `json:"remove"`
Size string `json:"size"`
Whirlpool string `json:"whirlpool"`
} `json:"links"`
}
// File is the structure how 1Fichier returns a File
type File struct {
ACL int `json:"acl"`
CDN int `json:"cdn"`
Checksum string `json:"checksum"`
ContentType string `json:"content-type"`
Date string `json:"date"`
Filename string `json:"filename"`
Pass int `json:"pass"`
Size int `json:"size"`
URL string `json:"url"`
}
// FilesList is the structure how 1Fichier returns a list of files
type FilesList struct {
Items []File `json:"items"`
Status string `json:"Status"`
}
// Folder is the structure how 1Fichier returns a Folder
type Folder struct {
CreateDate string `json:"create_date"`
ID int `json:"id"`
Name string `json:"name"`
Pass int `json:"pass"`
}
// FoldersList is the structure how 1Fichier returns a list of Folders
type FoldersList struct {
FolderID int `json:"folder_id"`
Name string `json:"name"`
Status string `json:"Status"`
SubFolders []Folder `json:"sub_folders"`
}

View File

@@ -2,8 +2,6 @@
package ftp
import (
"context"
"crypto/tls"
"io"
"net/textproto"
"os"
@@ -12,14 +10,13 @@ import (
"time"
"github.com/jlaffaye/ftp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
)
// Register with Fs
@@ -48,20 +45,6 @@ func init() {
Help: "FTP password",
IsPassword: true,
Required: true,
}, {
Name: "tls",
Help: "Use FTP over TLS (Implicit)",
Default: false,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0,
Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server",
Default: false,
Advanced: true,
},
},
})
@@ -69,13 +52,10 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
}
// Fs represents a remote FTP server
@@ -90,7 +70,6 @@ type Fs struct {
dialAddr string
poolMu sync.Mutex
pool []*ftp.ServerConn
tokens *pacer.TokenDispenser
}
// Object describes an FTP file
@@ -133,15 +112,7 @@ func (f *Fs) Features() *fs.Features {
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server")
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
if f.opt.TLS {
tlsConfig := &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
}
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
c, err := ftp.DialTimeout(f.dialAddr, fs.Config.ConnectTimeout)
if err != nil {
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Dial")
@@ -157,9 +128,6 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
// Get an FTP connection from the pool, or open a new one
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
f.tokens.Get()
}
f.poolMu.Lock()
if len(f.pool) > 0 {
c = f.pool[0]
@@ -179,9 +147,6 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
// if err is not nil then it checks the connection is alive using a
// NOOP request
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
defer f.tokens.Put()
}
c := *pc
*pc = nil
if err != nil {
@@ -201,9 +166,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
f.poolMu.Unlock()
}
// NewFs constructs an Fs from the path, container:path
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
ctx := context.Background()
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// Parse config into Options struct
opt := new(Options)
@@ -225,11 +189,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
}
dialAddr := opt.Host + ":" + port
protocol := "ftp://"
if opt.TLS {
protocol = "ftps://"
}
u := protocol + path.Join(dialAddr+"/", root)
u := "ftp://" + path.Join(dialAddr+"/", root)
f := &Fs{
name: name,
root: root,
@@ -238,7 +198,6 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
user: user,
pass: pass,
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
@@ -256,7 +215,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
if f.root == "." {
f.root = ""
}
_, err := f.NewObject(ctx, remote)
_, err := f.NewObject(remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
// File doesn't exist so return old f
@@ -321,7 +280,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(remote)
if err != nil {
@@ -365,42 +324,17 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "list")
}
var listErr error
var files []*ftp.Entry
resultchan := make(chan []*ftp.Entry, 1)
errchan := make(chan error, 1)
go func() {
result, err := c.List(path.Join(f.root, dir))
f.putFtpConnection(&c, err)
if err != nil {
errchan <- err
return
}
resultchan <- result
}()
// Wait for List for up to Timeout seconds
timer := time.NewTimer(fs.Config.Timeout)
select {
case listErr = <-errchan:
timer.Stop()
return nil, translateErrorDir(listErr)
case files = <-resultchan:
timer.Stop()
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f, "Timeout when waiting for List")
return nil, errors.New("Timeout when waiting for List")
files, err := c.List(path.Join(f.root, dir))
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorDir(err)
}
// Annoyingly FTP returns success for a directory which
// doesn't exist, so check it really doesn't exist if no
// entries found.
@@ -455,7 +389,7 @@ func (f *Fs) Precision() time.Duration {
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(src.Remote())
if err != nil {
@@ -465,13 +399,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
fs: f,
remote: src.Remote(),
}
err = o.Update(ctx, in, src, options...)
err = o.Update(in, src, options...)
return o, err
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// getInfo reads the FileInfo for a path
@@ -549,7 +483,7 @@ func (f *Fs) mkParentDir(remote string) error {
}
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
func (f *Fs) Mkdir(dir string) (err error) {
// defer fs.Trace(dir, "")("err=%v", &err)
root := path.Join(f.root, dir)
return f.mkdir(root)
@@ -558,7 +492,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
func (f *Fs) Rmdir(dir string) error {
c, err := f.getFtpConnection()
if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir")
@@ -569,7 +503,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
}
// Move renames a remote file object
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
@@ -591,7 +525,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, errors.Wrap(err, "Move Rename failed")
}
dstObj, err := f.NewObject(ctx, remote)
dstObj, err := f.NewObject(remote)
if err != nil {
return nil, errors.Wrap(err, "Move NewObject failed")
}
@@ -606,7 +540,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -669,7 +603,7 @@ func (o *Object) Remote() string {
}
// Hash returns the hash of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
func (o *Object) Hash(t hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
@@ -679,12 +613,12 @@ func (o *Object) Size() int64 {
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
func (o *Object) ModTime() time.Time {
return o.info.ModTime
}
// SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
func (o *Object) SetModTime(modTime time.Time) error {
return nil
}
@@ -712,21 +646,7 @@ func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
// Close the FTP reader and return the connection to the pool
func (f *ftpReadCloser) Close() error {
var err error
errchan := make(chan error, 1)
go func() {
errchan <- f.rc.Close()
}()
// Wait for Close for up to 60 seconds
timer := time.NewTimer(60 * time.Second)
select {
case err = <-errchan:
timer.Stop()
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f.f, "Timeout when waiting for connection Close")
return nil
}
err := f.rc.Close()
// if errors while reading or closing, dump the connection
if err != nil || f.err != nil {
_ = f.c.Quit()
@@ -745,7 +665,7 @@ func (f *ftpReadCloser) Close() error {
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
path := path.Join(o.fs.root, o.remote)
var offset, limit int64 = 0, -1
@@ -779,17 +699,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
// defer fs.Trace(o, "src=%v", src)("err=%v", &err)
path := path.Join(o.fs.root, o.remote)
// remove the file if upload failed
remove := func() {
// Give the FTP server a chance to get its internal state in order after the error.
// The error may have been local in which case we closed the connection. The server
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
// able to think of a better method to find out if the server has finished - ncw
time.Sleep(1 * time.Second)
removeErr := o.Remove(ctx)
removeErr := o.Remove()
if removeErr != nil {
fs.Debugf(o, "Failed to remove: %v", removeErr)
} else {
@@ -802,7 +717,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
err = c.Stor(path, in)
if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors
_ = c.Quit()
remove()
return errors.Wrap(err, "update stor")
}
@@ -815,7 +730,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
func (o *Object) Remove() (err error) {
// defer fs.Trace(o, "")("err=%v", &err)
path := path.Join(o.fs.root, o.remote)
// Check if it's a directory or a file
@@ -824,7 +739,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
return err
}
if info.IsDir {
err = o.fs.Rmdir(ctx, o.remote)
err = o.fs.Rmdir(o.remote)
} else {
c, err := o.fs.getFtpConnection()
if err != nil {

View File

@@ -4,8 +4,8 @@ package ftp_test
import (
"testing"
"github.com/rclone/rclone/backend/ftp"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/ftp"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote

View File

@@ -13,7 +13,6 @@ FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 erro
*/
import (
"context"
"encoding/base64"
"encoding/hex"
"fmt"
@@ -23,27 +22,26 @@ import (
"net/http"
"os"
"path"
"regexp"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
// NOTE: This API is deprecated
storage "google.golang.org/api/storage/v1"
)
@@ -60,7 +58,7 @@ const (
var (
// Description of how to auth for this app
storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageReadWriteScope},
Scopes: []string{storage.DevstorageFullControlScope},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
@@ -143,22 +141,6 @@ func init() {
Value: "publicReadWrite",
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
}},
}, {
Name: "bucket_policy_only",
Help: `Access checks should use bucket-level IAM policies.
If you want to upload objects to a bucket with Bucket Policy Only set
then you will need to set this.
When it is set, rclone:
- ignores ACLs set on buckets
- ignores ACLs set on objects
- creates buckets with Bucket Policy Only set
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
`,
Default: false,
}, {
Name: "location",
Help: "Location for the newly created buckets.",
@@ -177,36 +159,21 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "asia-east1",
Help: "Taiwan.",
}, {
Value: "asia-east2",
Help: "Hong Kong.",
}, {
Value: "asia-northeast1",
Help: "Tokyo.",
}, {
Value: "asia-south1",
Help: "Mumbai.",
}, {
Value: "asia-southeast1",
Help: "Singapore.",
}, {
Value: "australia-southeast1",
Help: "Sydney.",
}, {
Value: "europe-north1",
Help: "Finland.",
}, {
Value: "europe-west1",
Help: "Belgium.",
}, {
Value: "europe-west2",
Help: "London.",
}, {
Value: "europe-west3",
Help: "Frankfurt.",
}, {
Value: "europe-west4",
Help: "Netherlands.",
}, {
Value: "us-central1",
Help: "Iowa.",
@@ -219,9 +186,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "us-west1",
Help: "Oregon.",
}, {
Value: "us-west2",
Help: "California.",
}},
}, {
Name: "storage_class",
@@ -256,23 +220,22 @@ type Options struct {
ServiceAccountCredentials string `config:"service_account_credentials"`
ObjectACL string `config:"object_acl"`
BucketACL string `config:"bucket_acl"`
BucketPolicyOnly bool `config:"bucket_policy_only"`
Location string `config:"location"`
StorageClass string `config:"storage_class"`
}
// Fs represents a remote storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache of bucket status
pacer *fs.Pacer // To pace the API calls
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
pacer *pacer.Pacer // To pace the API calls
}
// Object describes a storage object
@@ -297,18 +260,18 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootBucket == "" {
return fmt.Sprintf("GCS root")
if f.root == "" {
return fmt.Sprintf("Storage bucket %s", f.bucket)
}
if f.rootDirectory == "" {
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
}
return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory)
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
}
// Features returns the optional features of this Fs
@@ -316,7 +279,7 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// shouldRetry determines whether a given err rates being retried
// shouldRetry determines whehter a given err rates being retried
func shouldRetry(err error) (again bool, errOut error) {
again = false
if err != nil {
@@ -340,23 +303,21 @@ func shouldRetry(err error) (again bool, errOut error) {
return again, err
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
// Pattern to match a storage path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parseParse parses a storage 'url'
func parsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find bucket in storage path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
return bucket.Split(path.Join(f.root, rootRelativePath))
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil {
@@ -366,13 +327,7 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, bucket:path
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
var oAuthClient *http.Client
@@ -390,7 +345,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
// try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
if opt.ServiceAccountCredentials != "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")
@@ -405,27 +360,26 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} else {
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
if err != nil {
ctx := context.Background()
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
}
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
}
}
f := &Fs{
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
bucket, directory, err := parsePath(root)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
bucket: bucket,
root: directory,
opt: *opt,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
// Create a new authorized Drive client.
@@ -435,18 +389,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
}
if f.rootBucket != "" && f.rootDirectory != "" {
if f.root != "" {
f.root += "/"
// Check to see if the object exists
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(f.rootBucket, f.rootDirectory).Do()
_, err = f.svc.Objects.Get(bucket, directory).Do()
return shouldRetry(err)
})
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
@@ -475,7 +431,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object,
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
@@ -487,17 +443,13 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
//
// The remote has prefix removed from it and if addBucket is set
// then it adds the bucket to the start.
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) {
if prefix != "" {
prefix += "/"
func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
root := f.root
rootLength := len(root)
if dir != "" {
root += dir + "/"
}
if directory != "" {
directory += "/"
}
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
list := f.svc.Objects.List(f.bucket).Prefix(root).MaxResults(listChunks)
if !recurse {
list = list.Delimiter("/")
}
@@ -517,36 +469,31 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
if !recurse {
var object storage.Object
for _, remote := range objects.Prefixes {
if !strings.HasSuffix(remote, "/") {
for _, prefix := range objects.Prefixes {
if !strings.HasSuffix(prefix, "/") {
continue
}
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(prefix) : len(remote)-1]
if addBucket {
remote = path.Join(bucket, remote)
}
err = fn(remote, &object, true)
err = fn(prefix[rootLength:len(prefix)-1], &object, true)
if err != nil {
return err
}
}
}
for _, object := range objects.Items {
if !strings.HasPrefix(object.Name, prefix) {
if !strings.HasPrefix(object.Name, root) {
fs.Logf(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[len(prefix):]
isDirectory := strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
remote := object.Name[rootLength:]
// is this a directory marker?
if isDirectory && object.Size == 0 {
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
if recurse && remote != "" {
// add a directory in if --fast-list since will have no prefixes
err = fn(remote[:len(remote)-1], object, true)
if err != nil {
return err
}
}
continue // skip directory marker
}
err = fn(remote, object, false)
@@ -575,10 +522,19 @@ func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory b
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketOKMu.Unlock()
}
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
// List the objects
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
err = f.list(dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
@@ -592,12 +548,15 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
f.markBucketOK()
return entries, err
}
// listBuckets lists the buckets
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
if f.opt.ProjectNumber == "" {
return nil, errors.New("can't list buckets without project number")
}
@@ -632,15 +591,11 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" {
return f.listBuckets(dir)
}
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
return f.listDir(dir)
}
// ListR lists the objects and directories of the Fs starting
@@ -659,44 +614,23 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if bucket == "" {
entries, err := f.listBuckets(ctx)
err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
bucket := entry.Remote()
err = listR(bucket, "", f.rootDirectory, true)
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
}
} else {
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
@@ -705,88 +639,83 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
return o, o.Update(ctx, in, src, options...)
return o, o.Update(in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
bucket, _ := f.split(dir)
return f.makeBucket(ctx, bucket)
}
func (f *Fs) Mkdir(dir string) (err error) {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
// makeBucket creates the bucket if it doesn't exist
func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
return f.cache.Create(bucket, func() error {
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(bucket).MaxResults(1).Do()
return shouldRetry(err)
})
if err == nil {
// Bucket already exists
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket")
}
} else {
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
return shouldRetry(err)
})
if err == nil {
// Bucket already exists
f.bucketOK = true
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket")
}
} else {
return errors.Wrap(err, "failed to get bucket")
}
if f.opt.ProjectNumber == "" {
return errors.New("can't make bucket without project number")
}
if f.opt.ProjectNumber == "" {
return errors.New("can't make bucket without project number")
}
bucket := storage.Bucket{
Name: bucket,
Location: f.opt.Location,
StorageClass: f.opt.StorageClass,
}
if f.opt.BucketPolicyOnly {
bucket.IamConfiguration = &storage.BucketIamConfiguration{
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
Enabled: true,
},
}
}
return f.pacer.Call(func() (bool, error) {
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
_, err = insertBucket.Do()
return shouldRetry(err)
})
}, nil)
bucket := storage.Bucket{
Name: f.bucket,
Location: f.opt.Location,
StorageClass: f.opt.StorageClass,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
return shouldRetry(err)
})
if err == nil {
f.bucketOK = true
}
return err
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty: Error 409: The bucket you tried
// to delete was not empty.
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
bucket, directory := f.split(dir)
if bucket == "" || directory != "" {
func (f *Fs) Rmdir(dir string) (err error) {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
return f.cache.Remove(bucket, func() error {
return f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(bucket).Do()
return shouldRetry(err)
})
err = f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(f.bucket).Do()
return shouldRetry(err)
})
if err == nil {
f.bucketOK = false
}
return err
}
// Precision returns the precision
@@ -803,9 +732,8 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.makeBucket(ctx, dstBucket)
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir("")
if err != nil {
return nil, err
}
@@ -814,7 +742,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcBucket, srcPath := srcObj.split()
// Temporary Object under construction
dstObj := &Object{
@@ -822,9 +749,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
remote: remote,
}
srcBucket := srcObj.fs.bucket
srcObject := srcObj.fs.root + srcObj.remote
dstBucket := f.bucket
dstObject := f.root + remote
var newObject *storage.Object
err = f.pacer.Call(func() (bool, error) {
newObject, err = f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil).Do()
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
return shouldRetry(err)
})
if err != nil {
@@ -861,7 +792,7 @@ func (o *Object) Remote() string {
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
@@ -914,10 +845,9 @@ func (o *Object) readMetaData() (err error) {
if !o.modTime.IsZero() {
return nil
}
bucket, bucketPath := o.split()
var object *storage.Object
err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Do()
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err)
})
if err != nil {
@@ -936,7 +866,7 @@ func (o *Object) readMetaData() (err error) {
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
// fs.Logf(o, "Failed to read metadata: %v", err)
@@ -953,17 +883,16 @@ func metadataFromModTime(modTime time.Time) map[string]string {
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
func (o *Object) SetModTime(modTime time.Time) (err error) {
// This only adds metadata so will perserve other metadata
bucket, bucketPath := o.split()
object := storage.Object{
Bucket: bucket,
Name: bucketPath,
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,
Metadata: metadataFromModTime(modTime),
}
var newObject *storage.Object
err = o.fs.pacer.Call(func() (bool, error) {
newObject, err = o.fs.svc.Objects.Patch(bucket, bucketPath, &object).Do()
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
return shouldRetry(err)
})
if err != nil {
@@ -979,12 +908,11 @@ func (o *Object) Storable() bool {
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
req, err := http.NewRequest("GET", o.url, nil)
if err != nil {
return nil, err
}
fs.FixRangeOption(options, o.bytes)
fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1011,27 +939,23 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split()
err := o.fs.makeBucket(ctx, bucket)
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
err := o.fs.Mkdir("")
if err != nil {
return err
}
modTime := src.ModTime(ctx)
modTime := src.ModTime()
object := storage.Object{
Bucket: bucket,
Name: bucketPath,
ContentType: fs.MimeType(ctx, src),
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,
ContentType: fs.MimeType(src),
Updated: modTime.Format(timeFormatOut), // Doesn't get set
Metadata: metadataFromModTime(modTime),
}
var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = insertObject.Do()
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
return shouldRetry(err)
})
if err != nil {
@@ -1043,17 +967,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split()
func (o *Object) Remove() (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Do()
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err)
})
return err
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
func (o *Object) MimeType() string {
return o.mimeType
}

View File

@@ -1,12 +1,11 @@
// Test GoogleCloudStorage filesystem interface
package googlecloudstorage_test
import (
"testing"
"github.com/rclone/rclone/backend/googlecloudstorage"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/googlecloudstorage"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote

View File

@@ -1,148 +0,0 @@
// This file contains the albums abstraction
package googlephotos
import (
"path"
"strings"
"sync"
"github.com/rclone/rclone/backend/googlephotos/api"
)
// All the albums
type albums struct {
mu sync.Mutex
dupes map[string][]*api.Album // duplicated names
byID map[string]*api.Album //..indexed by ID
byTitle map[string]*api.Album //..indexed by Title
path map[string][]string // partial album names to directory
}
// Create a new album
func newAlbums() *albums {
return &albums{
dupes: map[string][]*api.Album{},
byID: map[string]*api.Album{},
byTitle: map[string]*api.Album{},
path: map[string][]string{},
}
}
// add an album
func (as *albums) add(album *api.Album) {
// Munge the name of the album into a sensible path name
album.Title = path.Clean(album.Title)
if album.Title == "." || album.Title == "/" {
album.Title = addID("", album.ID)
}
as.mu.Lock()
as._add(album)
as.mu.Unlock()
}
// _add an album - call with lock held
func (as *albums) _add(album *api.Album) {
// update dupes by title
dupes := as.dupes[album.Title]
dupes = append(dupes, album)
as.dupes[album.Title] = dupes
// Dedupe the album name if necessary
if len(dupes) >= 2 {
// If this is the first dupe, then need to adjust the first one
if len(dupes) == 2 {
firstAlbum := dupes[0]
as._del(firstAlbum)
as._add(firstAlbum)
// undo add of firstAlbum to dupes
as.dupes[album.Title] = dupes
}
album.Title = addID(album.Title, album.ID)
}
// Store the new album
as.byID[album.ID] = album
as.byTitle[album.Title] = album
// Store the partial paths
dir, leaf := album.Title, ""
for dir != "" {
i := strings.LastIndex(dir, "/")
if i >= 0 {
dir, leaf = dir[:i], dir[i+1:]
} else {
dir, leaf = "", dir
}
dirs := as.path[dir]
found := false
for _, dir := range dirs {
if dir == leaf {
found = true
}
}
if !found {
as.path[dir] = append(as.path[dir], leaf)
}
}
}
// del an album
func (as *albums) del(album *api.Album) {
as.mu.Lock()
as._del(album)
as.mu.Unlock()
}
// _del an album - call with lock held
func (as *albums) _del(album *api.Album) {
// We leave in dupes so it doesn't cause albums to get renamed
// Remove from byID and byTitle
delete(as.byID, album.ID)
delete(as.byTitle, album.Title)
// Remove from paths
dir, leaf := album.Title, ""
for dir != "" {
// Can't delete if this dir exists anywhere in the path structure
if _, found := as.path[dir]; found {
break
}
i := strings.LastIndex(dir, "/")
if i >= 0 {
dir, leaf = dir[:i], dir[i+1:]
} else {
dir, leaf = "", dir
}
dirs := as.path[dir]
for i, dir := range dirs {
if dir == leaf {
dirs = append(dirs[:i], dirs[i+1:]...)
break
}
}
if len(dirs) == 0 {
delete(as.path, dir)
} else {
as.path[dir] = dirs
}
}
}
// get an album by title
func (as *albums) get(title string) (album *api.Album, ok bool) {
as.mu.Lock()
defer as.mu.Unlock()
album, ok = as.byTitle[title]
return album, ok
}
// getDirs gets directories below an album path
func (as *albums) getDirs(albumPath string) (dirs []string, ok bool) {
as.mu.Lock()
defer as.mu.Unlock()
dirs, ok = as.path[albumPath]
return dirs, ok
}

View File

@@ -1,311 +0,0 @@
package googlephotos
import (
"testing"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/stretchr/testify/assert"
)
func TestNewAlbums(t *testing.T) {
albums := newAlbums()
assert.NotNil(t, albums.dupes)
assert.NotNil(t, albums.byID)
assert.NotNil(t, albums.byTitle)
assert.NotNil(t, albums.path)
}
func TestAlbumsAdd(t *testing.T) {
albums := newAlbums()
assert.Equal(t, map[string][]*api.Album{}, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
assert.Equal(t, map[string][]string{}, albums.path)
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one"},
}, albums.path)
a2 := &api.Album{
Title: "two",
ID: "2",
}
albums.add(a2)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"two": a2,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two"},
}, albums.path)
// Add a duplicate
a2a := &api.Album{
Title: "two",
ID: "2a",
}
albums.add(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
}, albums.path)
// Add a sub directory
a1sub := &api.Album{
Title: "one/sub",
ID: "1sub",
}
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
// Add a weird path
a0 := &api.Album{
Title: "/../././..////.",
ID: "0",
}
albums.add(a0)
assert.Equal(t, map[string][]*api.Album{
"{0}": []*api.Album{a0},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"0": a0,
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"{0}": a0,
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}", "{0}"},
"one": []string{"sub"},
}, albums.path)
}
func TestAlbumsDel(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
a2 := &api.Album{
Title: "two",
ID: "2",
}
albums.add(a2)
// Add a duplicate
a2a := &api.Album{
Title: "two",
ID: "2a",
}
albums.add(a2a)
// Add a sub directory
a1sub := &api.Album{
Title: "one/sub",
ID: "1sub",
}
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a1)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a2)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one"},
"one": []string{"sub"},
}, albums.path)
albums.del(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
assert.Equal(t, map[string][]string{}, albums.path)
}
func TestAlbumsGet(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
album, ok := albums.get("one")
assert.Equal(t, true, ok)
assert.Equal(t, a1, album)
album, ok = albums.get("notfound")
assert.Equal(t, false, ok)
assert.Nil(t, album)
}
func TestAlbumsGetDirs(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
dirs, ok := albums.getDirs("")
assert.Equal(t, true, ok)
assert.Equal(t, []string{"one"}, dirs)
dirs, ok = albums.getDirs("notfound")
assert.Equal(t, false, ok)
assert.Nil(t, dirs)
}

View File

@@ -1,190 +0,0 @@
package api
import (
"fmt"
"time"
)
// ErrorDetails in the internals of the Error type
type ErrorDetails struct {
Code int `json:"code"`
Message string `json:"message"`
Status string `json:"status"`
}
// Error is returned on errors
type Error struct {
Details ErrorDetails `json:"error"`
}
// Error statisfies error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
}
// Album of photos
type Album struct {
ID string `json:"id,omitempty"`
Title string `json:"title"`
ProductURL string `json:"productUrl,omitempty"`
MediaItemsCount string `json:"mediaItemsCount,omitempty"`
CoverPhotoBaseURL string `json:"coverPhotoBaseUrl,omitempty"`
CoverPhotoMediaItemID string `json:"coverPhotoMediaItemId,omitempty"`
IsWriteable bool `json:"isWriteable,omitempty"`
}
// ListAlbums is returned from albums.list and sharedAlbums.list
type ListAlbums struct {
Albums []Album `json:"albums"`
SharedAlbums []Album `json:"sharedAlbums"`
NextPageToken string `json:"nextPageToken"`
}
// CreateAlbum creates an Album
type CreateAlbum struct {
Album *Album `json:"album"`
}
// MediaItem is a photo or video
type MediaItem struct {
ID string `json:"id"`
ProductURL string `json:"productUrl"`
BaseURL string `json:"baseUrl"`
MimeType string `json:"mimeType"`
MediaMetadata struct {
CreationTime time.Time `json:"creationTime"`
Width string `json:"width"`
Height string `json:"height"`
Photo struct {
} `json:"photo"`
} `json:"mediaMetadata"`
Filename string `json:"filename"`
}
// MediaItems is returned from mediaitems.list, mediaitems.search
type MediaItems struct {
MediaItems []MediaItem `json:"mediaItems"`
NextPageToken string `json:"nextPageToken"`
}
//Content categories
// NONE Default content category. This category is ignored when any other category is used in the filter.
// LANDSCAPES Media items containing landscapes.
// RECEIPTS Media items containing receipts.
// CITYSCAPES Media items containing cityscapes.
// LANDMARKS Media items containing landmarks.
// SELFIES Media items that are selfies.
// PEOPLE Media items containing people.
// PETS Media items containing pets.
// WEDDINGS Media items from weddings.
// BIRTHDAYS Media items from birthdays.
// DOCUMENTS Media items containing documents.
// TRAVEL Media items taken during travel.
// ANIMALS Media items containing animals.
// FOOD Media items containing food.
// SPORT Media items from sporting events.
// NIGHT Media items taken at night.
// PERFORMANCES Media items from performances.
// WHITEBOARDS Media items containing whiteboards.
// SCREENSHOTS Media items that are screenshots.
// UTILITY Media items that are considered to be utility. These include, but aren't limited to documents, screenshots, whiteboards etc.
// ARTS Media items containing art.
// CRAFTS Media items containing crafts.
// FASHION Media items related to fashion.
// HOUSES Media items containing houses.
// GARDENS Media items containing gardens.
// FLOWERS Media items containing flowers.
// HOLIDAYS Media items taken of holidays.
// MediaTypes
// ALL_MEDIA Treated as if no filters are applied. All media types are included.
// VIDEO All media items that are considered videos. This also includes movies the user has created using the Google Photos app.
// PHOTO All media items that are considered photos. This includes .bmp, .gif, .ico, .jpg (and other spellings), .tiff, .webp and special photo types such as iOS live photos, Android motion photos, panoramas, photospheres.
// Features
// NONE Treated as if no filters are applied. All features are included.
// FAVORITES Media items that the user has marked as favorites in the Google Photos app.
// Date is used as part of SearchFilter
type Date struct {
Year int `json:"year,omitempty"`
Month int `json:"month,omitempty"`
Day int `json:"day,omitempty"`
}
// DateFilter is uses to add date ranges to media item queries
type DateFilter struct {
Dates []Date `json:"dates,omitempty"`
Ranges []struct {
StartDate Date `json:"startDate,omitempty"`
EndDate Date `json:"endDate,omitempty"`
} `json:"ranges,omitempty"`
}
// ContentFilter is uses to add content categories to media item queries
type ContentFilter struct {
IncludedContentCategories []string `json:"includedContentCategories,omitempty"`
ExcludedContentCategories []string `json:"excludedContentCategories,omitempty"`
}
// MediaTypeFilter is uses to add media types to media item queries
type MediaTypeFilter struct {
MediaTypes []string `json:"mediaTypes,omitempty"`
}
// FeatureFilter is uses to add features to media item queries
type FeatureFilter struct {
IncludedFeatures []string `json:"includedFeatures,omitempty"`
}
// Filters combines all the filter types for media item queries
type Filters struct {
DateFilter *DateFilter `json:"dateFilter,omitempty"`
ContentFilter *ContentFilter `json:"contentFilter,omitempty"`
MediaTypeFilter *MediaTypeFilter `json:"mediaTypeFilter,omitempty"`
FeatureFilter *FeatureFilter `json:"featureFilter,omitempty"`
IncludeArchivedMedia *bool `json:"includeArchivedMedia,omitempty"`
ExcludeNonAppCreatedData *bool `json:"excludeNonAppCreatedData,omitempty"`
}
// SearchFilter is uses with mediaItems.search
type SearchFilter struct {
AlbumID string `json:"albumId,omitempty"`
PageSize int `json:"pageSize"`
PageToken string `json:"pageToken,omitempty"`
Filters *Filters `json:"filters,omitempty"`
}
// SimpleMediaItem is part of NewMediaItem
type SimpleMediaItem struct {
UploadToken string `json:"uploadToken"`
}
// NewMediaItem is a single media item for upload
type NewMediaItem struct {
Description string `json:"description"`
SimpleMediaItem SimpleMediaItem `json:"simpleMediaItem"`
}
// BatchCreateRequest creates media items from upload tokens
type BatchCreateRequest struct {
AlbumID string `json:"albumId,omitempty"`
NewMediaItems []NewMediaItem `json:"newMediaItems"`
}
// BatchCreateResponse is returned from BatchCreateRequest
type BatchCreateResponse struct {
NewMediaItemResults []struct {
UploadToken string `json:"uploadToken"`
Status struct {
Message string `json:"message"`
Code int `json:"code"`
} `json:"status"`
MediaItem MediaItem `json:"mediaItem"`
} `json:"newMediaItemResults"`
}
// BatchRemoveItems is for removing items from an album
type BatchRemoveItems struct {
MediaItemIds []string `json:"mediaItemIds"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,307 +0,0 @@
package googlephotos
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"path"
"testing"
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
// We have two different files here as Google Photos will uniq
// them otherwise which confuses the tests as the filename is
// unexpected.
fileNameAlbum = "rclone-test-image1.jpg"
fileNameUpload = "rclone-test-image2.jpg"
)
// Wrapper to override the remote for an object
type overrideRemoteObject struct {
fs.Object
remote string
}
// Remote returns the overridden remote name
func (o *overrideRemoteObject) Remote() string {
return o.remote
}
func TestIntegration(t *testing.T) {
ctx := context.Background()
fstest.Initialise()
// Create Fs
if *fstest.RemoteName == "" {
*fstest.RemoteName = "TestGooglePhotos:"
}
f, err := fs.NewFs(*fstest.RemoteName)
if err == fs.ErrorNotFoundInConfigFile {
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
}
require.NoError(t, err)
// Create local Fs pointing at testfiles
localFs, err := fs.NewFs("testfiles")
require.NoError(t, err)
t.Run("CreateAlbum", func(t *testing.T) {
albumName := "album/rclone-test-" + random.String(24)
err = f.Mkdir(ctx, albumName)
require.NoError(t, err)
remote := albumName + "/" + fileNameAlbum
t.Run("PutFile", func(t *testing.T) {
srcObj, err := localFs.NewObject(ctx, fileNameAlbum)
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
remoteWithID := addFileID(remote, dstObj.(*Object).id)
t.Run("ObjectFs", func(t *testing.T) {
assert.Equal(t, f, dstObj.Fs())
})
t.Run("ObjectString", func(t *testing.T) {
assert.Equal(t, remote, dstObj.String())
assert.Equal(t, "<nil>", (*Object)(nil).String())
})
t.Run("ObjectHash", func(t *testing.T) {
h, err := dstObj.Hash(ctx, hash.MD5)
assert.Equal(t, "", h)
assert.Equal(t, hash.ErrUnsupported, err)
})
t.Run("ObjectSize", func(t *testing.T) {
assert.Equal(t, int64(-1), dstObj.Size())
f.(*Fs).opt.ReadSize = true
defer func() {
f.(*Fs).opt.ReadSize = false
}()
size := dstObj.Size()
assert.True(t, size > 1000, fmt.Sprintf("Size too small %d", size))
})
t.Run("ObjectSetModTime", func(t *testing.T) {
err := dstObj.SetModTime(ctx, time.Now())
assert.Equal(t, fs.ErrorCantSetModTime, err)
})
t.Run("ObjectStorable", func(t *testing.T) {
assert.True(t, dstObj.Storable())
})
t.Run("ObjectOpen", func(t *testing.T) {
in, err := dstObj.Open(ctx)
require.NoError(t, err)
buf, err := ioutil.ReadAll(in)
require.NoError(t, err)
require.NoError(t, in.Close())
assert.True(t, len(buf) > 1000)
contentType := http.DetectContentType(buf[:512])
assert.Equal(t, "image/jpeg", contentType)
})
t.Run("CheckFileInAlbum", func(t *testing.T) {
entries, err := f.List(ctx, albumName)
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, remote, entries[0].Remote())
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
// Check it is there in the date/month/year heirachy
// 2013-07-13 is the creation date of the folder
checkPresent := func(t *testing.T, objPath string) {
entries, err := f.List(ctx, objPath)
require.NoError(t, err)
found := false
for _, entry := range entries {
leaf := path.Base(entry.Remote())
if leaf == fileNameAlbum || leaf == remoteWithID {
found = true
}
}
assert.True(t, found, fmt.Sprintf("didn't find %q in %q", fileNameAlbum, objPath))
}
t.Run("CheckInByYear", func(t *testing.T) {
checkPresent(t, "media/by-year/2013")
})
t.Run("CheckInByMonth", func(t *testing.T) {
checkPresent(t, "media/by-month/2013/2013-07")
})
t.Run("CheckInByDay", func(t *testing.T) {
checkPresent(t, "media/by-day/2013/2013-07-26")
})
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, remote)
require.NoError(t, err)
require.Equal(t, remote, o.Remote())
})
t.Run("NewObjectWithID", func(t *testing.T) {
o, err := f.NewObject(ctx, remoteWithID)
require.NoError(t, err)
require.Equal(t, remoteWithID, o.Remote())
})
t.Run("NewFsIsFile", func(t *testing.T) {
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
assert.Equal(t, fs.ErrorIsFile, err)
leaf := path.Base(remote)
o, err := fNew.NewObject(ctx, leaf)
require.NoError(t, err)
require.Equal(t, leaf, o.Remote())
})
t.Run("RemoveFileFromAlbum", func(t *testing.T) {
err = dstObj.Remove(ctx)
require.NoError(t, err)
time.Sleep(time.Second)
// Check album empty
entries, err := f.List(ctx, albumName)
require.NoError(t, err)
assert.Equal(t, 0, len(entries))
})
})
// remove the album
err = f.Rmdir(ctx, albumName)
require.Error(t, err) // FIXME doesn't work yet
})
t.Run("UploadMkdir", func(t *testing.T) {
assert.NoError(t, f.Mkdir(ctx, "upload/dir"))
assert.NoError(t, f.Mkdir(ctx, "upload/dir/subdir"))
t.Run("List", func(t *testing.T) {
entries, err := f.List(ctx, "upload")
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "upload/dir", entries[0].Remote())
entries, err = f.List(ctx, "upload/dir")
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "upload/dir/subdir", entries[0].Remote())
})
t.Run("Rmdir", func(t *testing.T) {
assert.NoError(t, f.Rmdir(ctx, "upload/dir/subdir"))
assert.NoError(t, f.Rmdir(ctx, "upload/dir"))
})
t.Run("ListEmpty", func(t *testing.T) {
entries, err := f.List(ctx, "upload")
require.NoError(t, err)
assert.Equal(t, 0, len(entries))
_, err = f.List(ctx, "upload/dir")
assert.Equal(t, fs.ErrorDirNotFound, err)
})
})
t.Run("Upload", func(t *testing.T) {
uploadDir := "upload/dir/subdir"
remote := path.Join(uploadDir, fileNameUpload)
srcObj, err := localFs.NewObject(ctx, fileNameUpload)
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
remoteWithID := addFileID(remote, dstObj.(*Object).id)
t.Run("List", func(t *testing.T) {
entries, err := f.List(ctx, uploadDir)
require.NoError(t, err)
require.Equal(t, 1, len(entries))
assert.Equal(t, remote, entries[0].Remote())
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, remote)
require.NoError(t, err)
require.Equal(t, remote, o.Remote())
})
t.Run("NewObjectWithID", func(t *testing.T) {
o, err := f.NewObject(ctx, remoteWithID)
require.NoError(t, err)
require.Equal(t, remoteWithID, o.Remote())
})
})
t.Run("Name", func(t *testing.T) {
assert.Equal(t, (*fstest.RemoteName)[:len(*fstest.RemoteName)-1], f.Name())
})
t.Run("Root", func(t *testing.T) {
assert.Equal(t, "", f.Root())
})
t.Run("String", func(t *testing.T) {
assert.Equal(t, `Google Photos path ""`, f.String())
})
t.Run("Features", func(t *testing.T) {
features := f.Features()
assert.False(t, features.CaseInsensitive)
assert.True(t, features.ReadMimeType)
})
t.Run("Precision", func(t *testing.T) {
assert.Equal(t, fs.ModTimeNotSupported, f.Precision())
})
t.Run("Hashes", func(t *testing.T) {
assert.Equal(t, hash.Set(hash.None), f.Hashes())
})
}
func TestAddID(t *testing.T) {
assert.Equal(t, "potato {123}", addID("potato", "123"))
assert.Equal(t, "{123}", addID("", "123"))
}
func TestFileAddID(t *testing.T) {
assert.Equal(t, "potato {123}.txt", addFileID("potato.txt", "123"))
assert.Equal(t, "potato {123}", addFileID("potato", "123"))
assert.Equal(t, "{123}", addFileID("", "123"))
}
func TestFindID(t *testing.T) {
assert.Equal(t, "", findID("potato"))
ID := "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
assert.Equal(t, ID, findID("potato {"+ID+"}.txt"))
ID = ID[1:]
assert.Equal(t, "", findID("potato {"+ID+"}.txt"))
}

View File

@@ -1,335 +0,0 @@
// Store the parsing of file patterns
package googlephotos
import (
"context"
"fmt"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
)
// lister describes the subset of the interfaces on Fs needed for the
// file pattern parsing
type lister interface {
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
listAlbums(shared bool) (all *albums, err error)
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
dirTime() time.Time
}
// dirPattern describes a single directory pattern
type dirPattern struct {
re string // match for the path
match *regexp.Regexp // compiled match
canUpload bool // true if can upload here
canMkdir bool // true if can make a directory here
isFile bool // true if this is a file
isUpload bool // true if this is the upload directory
// function to turn a match into DirEntries
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
}
// dirPatters is a slice of all the directory patterns
type dirPatterns []dirPattern
// patterns describes the layout of the google photos backend file system.
//
// NB no trailing / on paths
var patterns = dirPatterns{
{
re: `^$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return fs.DirEntries{
fs.NewDir(prefix+"media", f.dirTime()),
fs.NewDir(prefix+"album", f.dirTime()),
fs.NewDir(prefix+"shared-album", f.dirTime()),
fs.NewDir(prefix+"upload", f.dirTime()),
}, nil
},
},
{
re: `^upload(?:/(.*))?$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return f.listUploads(ctx, match[0])
},
canUpload: true,
canMkdir: true,
isUpload: true,
},
{
re: `^upload/(.*)$`,
isFile: true,
canUpload: true,
isUpload: true,
},
{
re: `^media$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return fs.DirEntries{
fs.NewDir(prefix+"all", f.dirTime()),
fs.NewDir(prefix+"by-year", f.dirTime()),
fs.NewDir(prefix+"by-month", f.dirTime()),
fs.NewDir(prefix+"by-day", f.dirTime()),
}, nil
},
},
{
re: `^media/all$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return f.listDir(ctx, prefix, api.SearchFilter{})
},
},
{
re: `^media/all/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-year$`,
toEntries: years,
},
{
re: `^media/by-year/(\d{4})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-year/(\d{4})/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-month$`,
toEntries: years,
},
{
re: `^media/by-month/(\d{4})$`,
toEntries: months,
},
{
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-day$`,
toEntries: years,
},
{
re: `^media/by-day/(\d{4})$`,
toEntries: days,
},
{
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})/([^/]+)$`,
isFile: true,
},
{
re: `^album$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, false, prefix, "")
},
},
{
re: `^album/(.+)$`,
canMkdir: true,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, false, prefix, match[1])
},
},
{
re: `^album/(.+?)/([^/]+)$`,
canUpload: true,
isFile: true,
},
{
re: `^shared-album$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, true, prefix, "")
},
},
{
re: `^shared-album/(.+)$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, true, prefix, match[1])
},
},
{
re: `^shared-album/(.+?)/([^/]+)$`,
isFile: true,
},
}.mustCompile()
// mustCompile compiles the regexps in the dirPatterns
func (ds dirPatterns) mustCompile() dirPatterns {
for i := range ds {
pattern := &ds[i]
pattern.match = regexp.MustCompile(pattern.re)
}
return ds
}
// match finds the path passed in in the matching structure and
// returns the parameters and a pointer to the match, or nil.
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
itemPath = strings.Trim(itemPath, "/")
absPath := path.Join(root, itemPath)
prefix = strings.Trim(absPath[len(root):], "/")
if prefix != "" {
prefix += "/"
}
for i := range ds {
pattern = &ds[i]
if pattern.isFile != isFile {
continue
}
match = pattern.match.FindStringSubmatch(absPath)
if match != nil {
return
}
}
return nil, "", nil
}
// Return the years from 2000 to today
// FIXME make configurable?
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
currentYear := f.dirTime().Year()
for year := 2000; year <= currentYear; year++ {
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
}
return entries, nil
}
// Return the months in a given year
func months(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
year := match[1]
for month := 1; month <= 12; month++ {
entries = append(entries, fs.NewDir(fmt.Sprintf("%s%s-%02d", prefix, year, month), f.dirTime()))
}
return entries, nil
}
// Return the days in a given year
func days(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
year := match[1]
current, err := time.Parse("2006", year)
if err != nil {
return nil, errors.Errorf("bad year %q", match[1])
}
currentYear := current.Year()
for current.Year() == currentYear {
entries = append(entries, fs.NewDir(prefix+current.Format("2006-01-02"), f.dirTime()))
current = current.AddDate(0, 0, 1)
}
return entries, nil
}
// This creates a search filter on year/month/day as provided
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
year, err := strconv.Atoi(match[1])
if err != nil || year < 1000 || year > 3000 {
return sf, errors.Errorf("bad year %q", match[1])
}
sf = api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Year: year,
},
},
},
},
}
if len(match) >= 3 {
month, err := strconv.Atoi(match[2])
if err != nil || month < 1 || month > 12 {
return sf, errors.Errorf("bad month %q", match[2])
}
sf.Filters.DateFilter.Dates[0].Month = month
}
if len(match) >= 4 {
day, err := strconv.Atoi(match[3])
if err != nil || day < 1 || day > 31 {
return sf, errors.Errorf("bad day %q", match[3])
}
sf.Filters.DateFilter.Dates[0].Day = day
}
return sf, nil
}
// Turns an albumPath into entries
//
// These can either be synthetic directory entries if the album path
// is a prefix of another album, or actual files, or a combination of
// the two.
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
albums, err := f.listAlbums(shared)
if err != nil {
return nil, err
}
// Put in the directories
dirs, foundAlbumPath := albums.getDirs(albumPath)
if foundAlbumPath {
for _, dir := range dirs {
d := fs.NewDir(prefix+dir, f.dirTime())
dirPath := path.Join(albumPath, dir)
// if this dir is an album add more special stuff
album, ok := albums.get(dirPath)
if ok {
count, err := strconv.ParseInt(album.MediaItemsCount, 10, 64)
if err != nil {
fs.Debugf(f, "Error reading media count: %v", err)
}
d.SetID(album.ID).SetItems(count)
}
entries = append(entries, d)
}
}
// if this is an album then return a filter to list it
album, foundAlbum := albums.get(albumPath)
if foundAlbum {
filter := api.SearchFilter{AlbumID: album.ID}
newEntries, err := f.listDir(ctx, prefix, filter)
if err != nil {
return nil, err
}
entries = append(entries, newEntries...)
}
if !foundAlbumPath && !foundAlbum && albumPath != "" {
return nil, fs.ErrorDirNotFound
}
return entries, nil
}

View File

@@ -1,495 +0,0 @@
package googlephotos
import (
"context"
"fmt"
"testing"
"time"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// time for directories
var startTime = fstest.Time("2019-06-24T15:53:05.999999999Z")
// mock Fs for testing patterns
type testLister struct {
t *testing.T
albums *albums
names []string
uploaded dirtree.DirTree
}
// newTestLister makes a mock for testing
func newTestLister(t *testing.T) *testLister {
return &testLister{
t: t,
albums: newAlbums(),
uploaded: dirtree.New(),
}
}
// mock listDir for testing
func (f *testLister) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
for _, name := range f.names {
entries = append(entries, mockobject.New(prefix+name))
}
return entries, nil
}
// mock listAlbums for testing
func (f *testLister) listAlbums(shared bool) (all *albums, err error) {
return f.albums, nil
}
// mock listUploads for testing
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, _ = f.uploaded[dir]
return entries, nil
}
// mock dirTime for testing
func (f *testLister) dirTime() time.Time {
return startTime
}
func TestPatternMatch(t *testing.T) {
for testNumber, test := range []struct {
// input
root string
itemPath string
isFile bool
// expected output
wantMatch []string
wantPrefix string
wantPattern *dirPattern
}{
{
root: "",
itemPath: "",
isFile: false,
wantMatch: []string{""},
wantPrefix: "",
wantPattern: &patterns[0],
},
{
root: "",
itemPath: "",
isFile: true,
wantMatch: nil,
wantPrefix: "",
wantPattern: nil,
},
{
root: "upload",
itemPath: "",
isFile: false,
wantMatch: []string{"upload", ""},
wantPrefix: "",
wantPattern: &patterns[1],
},
{
root: "upload/dir",
itemPath: "",
isFile: false,
wantMatch: []string{"upload/dir", "dir"},
wantPrefix: "",
wantPattern: &patterns[1],
},
{
root: "upload/file.jpg",
itemPath: "",
isFile: true,
wantMatch: []string{"upload/file.jpg", "file.jpg"},
wantPrefix: "",
wantPattern: &patterns[2],
},
{
root: "media",
itemPath: "",
isFile: false,
wantMatch: []string{"media"},
wantPrefix: "",
wantPattern: &patterns[3],
},
{
root: "",
itemPath: "media",
isFile: false,
wantMatch: []string{"media"},
wantPrefix: "media/",
wantPattern: &patterns[3],
},
{
root: "media/all",
itemPath: "",
isFile: false,
wantMatch: []string{"media/all"},
wantPrefix: "",
wantPattern: &patterns[4],
},
{
root: "media",
itemPath: "all",
isFile: false,
wantMatch: []string{"media/all"},
wantPrefix: "all/",
wantPattern: &patterns[4],
},
{
root: "media/all",
itemPath: "file.jpg",
isFile: true,
wantMatch: []string{"media/all/file.jpg", "file.jpg"},
wantPrefix: "file.jpg/",
wantPattern: &patterns[5],
},
} {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)
assert.Equal(t, test.wantMatch, gotMatch)
assert.Equal(t, test.wantPrefix, gotPrefix)
assert.Equal(t, test.wantPattern, gotPattern)
})
}
}
func TestPatternMatchToEntries(t *testing.T) {
ctx := context.Background()
f := newTestLister(t)
f.names = []string{"file.jpg"}
f.albums.add(&api.Album{
ID: "1",
Title: "sub/one",
})
f.albums.add(&api.Album{
ID: "2",
Title: "sub",
})
f.uploaded.AddEntry(mockobject.New("upload/file1.jpg"))
f.uploaded.AddEntry(mockobject.New("upload/dir/file2.jpg"))
for testNumber, test := range []struct {
// input
root string
itemPath string
// expected output
wantMatch []string
wantPrefix string
remotes []string
}{
{
root: "",
itemPath: "",
wantMatch: []string{""},
wantPrefix: "",
remotes: []string{"media/", "album/", "shared-album/", "upload/"},
},
{
root: "upload",
itemPath: "",
wantMatch: []string{"upload", ""},
wantPrefix: "",
remotes: []string{"upload/file1.jpg", "upload/dir/"},
},
{
root: "upload",
itemPath: "dir",
wantMatch: []string{"upload/dir", "dir"},
wantPrefix: "dir/",
remotes: []string{"upload/dir/file2.jpg"},
},
{
root: "media",
itemPath: "",
wantMatch: []string{"media"},
wantPrefix: "",
remotes: []string{"all/", "by-year/", "by-month/", "by-day/"},
},
{
root: "media/all",
itemPath: "",
wantMatch: []string{"media/all"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media",
itemPath: "all",
wantMatch: []string{"media/all"},
wantPrefix: "all/",
remotes: []string{"all/file.jpg"},
},
{
root: "media/by-year",
itemPath: "",
wantMatch: []string{"media/by-year"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-year/2000",
itemPath: "",
wantMatch: []string{"media/by-year/2000", "2000"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media/by-month",
itemPath: "",
wantMatch: []string{"media/by-month"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-month/2001",
itemPath: "",
wantMatch: []string{"media/by-month/2001", "2001"},
wantPrefix: "",
remotes: []string{"2001-01/", "2001-02/", "2001-03/", "2001-04/"},
},
{
root: "media/by-month/2001/2001-01",
itemPath: "",
wantMatch: []string{"media/by-month/2001/2001-01", "2001", "01"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media/by-day",
itemPath: "",
wantMatch: []string{"media/by-day"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-day/2001",
itemPath: "",
wantMatch: []string{"media/by-day/2001", "2001"},
wantPrefix: "",
remotes: []string{"2001-01-01/", "2001-01-02/", "2001-01-03/", "2001-01-04/"},
},
{
root: "media/by-day/2001/2001-01-02",
itemPath: "",
wantMatch: []string{"media/by-day/2001/2001-01-02", "2001", "01", "02"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "album",
itemPath: "",
wantMatch: []string{"album"},
wantPrefix: "",
remotes: []string{"sub/"},
},
{
root: "album/sub",
itemPath: "",
wantMatch: []string{"album/sub", "sub"},
wantPrefix: "",
remotes: []string{"one/", "file.jpg"},
},
{
root: "album/sub/one",
itemPath: "",
wantMatch: []string{"album/sub/one", "sub/one"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "shared-album",
itemPath: "",
wantMatch: []string{"shared-album"},
wantPrefix: "",
remotes: []string{"sub/"},
},
{
root: "shared-album/sub",
itemPath: "",
wantMatch: []string{"shared-album/sub", "sub"},
wantPrefix: "",
remotes: []string{"one/", "file.jpg"},
},
{
root: "shared-album/sub/one",
itemPath: "",
wantMatch: []string{"shared-album/sub/one", "sub/one"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
} {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q", testNumber, test.root, test.itemPath), func(t *testing.T) {
match, prefix, pattern := patterns.match(test.root, test.itemPath, false)
assert.Equal(t, test.wantMatch, match)
assert.Equal(t, test.wantPrefix, prefix)
assert.NotNil(t, pattern)
assert.NotNil(t, pattern.toEntries)
entries, err := pattern.toEntries(ctx, f, prefix, match)
assert.NoError(t, err)
var remotes = []string{}
for _, entry := range entries {
remote := entry.Remote()
if _, isDir := entry.(fs.Directory); isDir {
remote += "/"
}
remotes = append(remotes, remote)
if len(remotes) >= 4 {
break // only test first 4 entries
}
}
assert.Equal(t, test.remotes, remotes)
})
}
}
func TestPatternYears(t *testing.T) {
f := newTestLister(t)
entries, err := years(context.Background(), f, "potato/", nil)
require.NoError(t, err)
year := 2000
for _, entry := range entries {
assert.Equal(t, "potato/"+fmt.Sprint(year), entry.Remote())
year++
}
}
func TestPatternMonths(t *testing.T) {
f := newTestLister(t)
entries, err := months(context.Background(), f, "potato/", []string{"", "2020"})
require.NoError(t, err)
assert.Equal(t, 12, len(entries))
for i, entry := range entries {
assert.Equal(t, fmt.Sprintf("potato/2020-%02d", i+1), entry.Remote())
}
}
func TestPatternDays(t *testing.T) {
f := newTestLister(t)
entries, err := days(context.Background(), f, "potato/", []string{"", "2020"})
require.NoError(t, err)
assert.Equal(t, 366, len(entries))
assert.Equal(t, "potato/2020-01-01", entries[0].Remote())
assert.Equal(t, "potato/2020-12-31", entries[len(entries)-1].Remote())
}
func TestPatternYearMonthDayFilter(t *testing.T) {
ctx := context.Background()
f := newTestLister(t)
// Years
sf, err := yearMonthDayFilter(ctx, f, []string{"", "2000"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "999"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "4000"})
require.Error(t, err)
// Months
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Month: 1,
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "0"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "13"})
require.Error(t, err)
// Days
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "02"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Day: 2,
Month: 1,
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "0"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "32"})
require.Error(t, err)
}
func TestPatternAlbumsToEntries(t *testing.T) {
f := newTestLister(t)
ctx := context.Background()
_, err := albumsToEntries(ctx, f, false, "potato/", "sub")
assert.Equal(t, fs.ErrorDirNotFound, err)
f.albums.add(&api.Album{
ID: "1",
Title: "sub/one",
})
entries, err := albumsToEntries(ctx, f, false, "potato/", "sub")
assert.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "potato/one", entries[0].Remote())
_, ok := entries[0].(fs.Directory)
assert.Equal(t, true, ok)
f.albums.add(&api.Album{
ID: "1",
Title: "sub",
})
f.names = []string{"file.jpg"}
entries, err = albumsToEntries(ctx, f, false, "potato/", "sub")
assert.NoError(t, err)
assert.Equal(t, 2, len(entries))
assert.Equal(t, "potato/one", entries[0].Remote())
_, ok = entries[0].(fs.Directory)
assert.Equal(t, true, ok)
assert.Equal(t, "potato/file.jpg", entries[1].Remote())
_, ok = entries[1].(fs.Object)
assert.Equal(t, true, ok)
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

View File

@@ -5,9 +5,7 @@
package http
import (
"context"
"io"
"mime"
"net/http"
"net/url"
"path"
@@ -15,13 +13,13 @@ import (
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/net/html"
)
@@ -42,41 +40,7 @@ func init() {
Examples: []fs.OptionExample{{
Value: "https://example.com",
Help: "Connect to example.com",
}, {
Value: "https://user:pass@example.com",
Help: "Connect to example.com using a username and password",
}},
}, {
Name: "headers",
Help: `Set HTTP headers for all transactions
Use this to set additional HTTP headers for all transactions
The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used.
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
`,
Default: fs.CommaSepList{},
Advanced: true,
}, {
Name: "no_slash",
Help: `Set this if the site doesn't end directories with /
Use this if your target website does not use / on the end of
directories.
A / on the end of a path is how rclone normally tells the difference
between files and directories. If this flag is set, then rclone will
treat all files with Content-Type: text/html as directories and read
URLs from them rather than downloading them.
Note that this may cause rclone to confuse genuine HTML files with
directories.`,
Default: false,
Advanced: true,
}},
}
fs.Register(fsi)
@@ -84,9 +48,7 @@ directories.`,
// Options defines the configuration for this backend
type Options struct {
Endpoint string `config:"url"`
NoSlash bool `config:"no_slash"`
Headers fs.CommaSepList `config:"headers"`
Endpoint string `config:"url"`
}
// Fs stores the interface to the remote HTTP files
@@ -131,10 +93,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
if len(opt.Headers)%2 != 0 {
return nil, errors.New("odd number of headers supplied")
}
if !strings.HasSuffix(opt.Endpoint, "/") {
opt.Endpoint += "/"
}
@@ -160,14 +118,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return http.ErrUseLastResponse
}
// check to see if points to a file
req, err := http.NewRequest("HEAD", u.String(), nil)
res, err := noRedir.Head(u.String())
err = statusError(res, err)
if err == nil {
addHeaders(req, opt)
res, err := noRedir.Do(req)
err = statusError(res, err)
if err == nil {
isFile = true
}
isFile = true
}
}
@@ -232,14 +186,14 @@ func (f *Fs) Precision() time.Duration {
}
// NewObject creates a new remote http file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
func (f *Fs) NewObject(remote string) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
err := o.stat()
if err != nil {
return nil, err
return nil, errors.Wrap(err, "Stat failed")
}
return o, nil
}
@@ -294,7 +248,7 @@ func parseName(base *url.URL, name string) (string, error) {
}
// calculate the name relative to the base
name = u.Path[len(base.Path):]
// mustn't be empty
// musn't be empty
if name == "" {
return "", errNameIsEmpty
}
@@ -313,20 +267,14 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
if err != nil {
return nil, err
}
var (
walk func(*html.Node)
seen = make(map[string]struct{})
)
var walk func(*html.Node)
walk = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" {
for _, a := range n.Attr {
if a.Key == "href" {
name, err := parseName(base, a.Val)
if err == nil {
if _, found := seen[name]; !found {
names = append(names, name)
seen[name] = struct{}{}
}
names = append(names, name)
}
break
}
@@ -340,20 +288,6 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
return names, nil
}
// Adds the configured headers to the request if any
func addHeaders(req *http.Request, opt *Options) {
for i := 0; i < len(opt.Headers); i += 2 {
key := opt.Headers[i]
value := opt.Headers[i+1]
req.Header.Add(key, value)
}
}
// Adds the configured headers to the request if any
func (f *Fs) addHeaders(req *http.Request) {
addHeaders(req, &f.opt)
}
// Read the directory passed in
func (f *Fs) readDir(dir string) (names []string, err error) {
URL := f.url(dir)
@@ -364,23 +298,15 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
if !strings.HasSuffix(URL, "/") {
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
// Do the request
req, err := http.NewRequest("GET", URL, nil)
if err != nil {
return nil, errors.Wrap(err, "readDir failed")
}
f.addHeaders(req)
res, err := f.httpClient.Do(req)
if err == nil {
defer fs.CheckClose(res.Body, &err)
if res.StatusCode == http.StatusNotFound {
return nil, fs.ErrorDirNotFound
}
res, err := f.httpClient.Get(URL)
if err == nil && res.StatusCode == http.StatusNotFound {
return nil, fs.ErrorDirNotFound
}
err = statusError(res, err)
if err != nil {
return nil, errors.Wrap(err, "failed to readDir")
}
defer fs.CheckClose(res.Body, &err)
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
switch contentType {
@@ -404,7 +330,7 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if !strings.HasSuffix(dir, "/") && dir != "" {
dir += "/"
}
@@ -424,16 +350,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fs: f,
remote: remote,
}
switch err = file.stat(); err {
case nil:
entries = append(entries, file)
case fs.ErrorNotAFile:
// ...found a directory not a file
dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
default:
if err = file.stat(); err != nil {
fs.Debugf(remote, "skipping because of error: %v", err)
continue
}
entries = append(entries, file)
}
}
return entries, nil
@@ -444,12 +365,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
@@ -472,7 +393,7 @@ func (o *Object) Remote() string {
}
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
func (o *Object) Hash(r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
@@ -482,7 +403,7 @@ func (o *Object) Size() int64 {
}
// ModTime returns the modification time of the remote http file
func (o *Object) ModTime(ctx context.Context) time.Time {
func (o *Object) ModTime() time.Time {
return o.modTime
}
@@ -494,15 +415,7 @@ func (o *Object) url() string {
// stat updates the info field in the Object
func (o *Object) stat() error {
url := o.url()
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return errors.Wrap(err, "stat failed")
}
o.fs.addHeaders(req)
res, err := o.fs.httpClient.Do(req)
if err == nil && res.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
res, err := o.fs.httpClient.Head(url)
err = statusError(res, err)
if err != nil {
return errors.Wrap(err, "failed to stat")
@@ -514,23 +427,13 @@ func (o *Object) stat() error {
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
o.modTime = t
o.contentType = res.Header.Get("Content-Type")
// If NoSlash is set then check ContentType to see if it is a directory
if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType)
if err != nil {
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
}
if mediaType == "text/html" {
return fs.ErrorNotAFile
}
}
return nil
}
// SetModTime sets the modification and access time to the specified time
//
// it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
func (o *Object) SetModTime(modTime time.Time) error {
return errorReadOnly
}
@@ -540,7 +443,7 @@ func (o *Object) Storable() bool {
}
// Open a remote http file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.url()
req, err := http.NewRequest("GET", url, nil)
if err != nil {
@@ -551,7 +454,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
for k, v := range fs.OpenOptionHeaders(options) {
req.Header.Add(k, v)
}
o.fs.addHeaders(req)
// Do the request
res, err := o.fs.httpClient.Do(req)
@@ -568,27 +470,27 @@ func (f *Fs) Hashes() hash.Set {
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
func (f *Fs) Mkdir(dir string) error {
return errorReadOnly
}
// Remove a remote http file object
func (o *Object) Remove(ctx context.Context) error {
func (o *Object) Remove() error {
return errorReadOnly
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
func (f *Fs) Rmdir(dir string) error {
return errorReadOnly
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errorReadOnly
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
func (o *Object) MimeType() string {
return o.contentType
}

View File

@@ -1,7 +1,8 @@
// +build go1.8
package http
import (
"context"
"fmt"
"io/ioutil"
"net/http"
@@ -10,15 +11,14 @@ import (
"os"
"path/filepath"
"sort"
"strings"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/rest"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/lib/rest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -27,7 +27,6 @@ var (
remoteName = "TestHTTP"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
)
// prepareServer the test server and return a function to tidy it up afterwards
@@ -35,16 +34,8 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
// test the headers are there then pass on to fileServer
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
fileServer.ServeHTTP(w, r)
})
// Make the test server
ts := httptest.NewServer(handler)
ts := httptest.NewServer(fileServer)
// Configure the remote
config.LoadConfig()
@@ -55,9 +46,8 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// config.FileSet(remoteName, "url", ts.URL)
m := configmap.Simple{
"type": "http",
"url": ts.URL,
"headers": strings.Join(headers, ","),
"type": "http",
"url": ts.URL,
}
// return a function to tidy up
@@ -75,8 +65,8 @@ func prepare(t *testing.T) (fs.Fs, func()) {
return f, tidy
}
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
entries, err := f.List(context.Background(), "")
func testListRoot(t *testing.T, f fs.Fs) {
entries, err := f.List("")
require.NoError(t, err)
sort.Sort(entries)
@@ -103,36 +93,22 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
e = entries[3]
assert.Equal(t, "two.html", e.Remote())
if noSlash {
assert.Equal(t, int64(-1), e.Size())
_, ok = e.(fs.Directory)
assert.True(t, ok)
} else {
assert.Equal(t, int64(41), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
}
assert.Equal(t, int64(7), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
}
func TestListRoot(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
testListRoot(t, f, false)
}
func TestListRootNoSlash(t *testing.T) {
f, tidy := prepare(t)
f.(*Fs).opt.NoSlash = true
defer tidy()
testListRoot(t, f, true)
testListRoot(t, f)
}
func TestListSubDir(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
entries, err := f.List(context.Background(), "three")
entries, err := f.List("three")
require.NoError(t, err)
sort.Sort(entries)
@@ -150,7 +126,7 @@ func TestNewObject(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt")
o, err := f.NewObject("four/under four.txt")
require.NoError(t, err)
assert.Equal(t, "four/under four.txt", o.Remote())
@@ -160,7 +136,7 @@ func TestNewObject(t *testing.T) {
// Test the time is correct on the object
tObj := o.ModTime(context.Background())
tObj := o.ModTime()
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
@@ -168,22 +144,17 @@ func TestNewObject(t *testing.T) {
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
// check object not found
o, err = f.NewObject(context.Background(), "not found.txt")
assert.Nil(t, o)
assert.Equal(t, fs.ErrorObjectNotFound, err)
}
func TestOpen(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt")
o, err := f.NewObject("four/under four.txt")
require.NoError(t, err)
// Test normal read
fd, err := o.Open(context.Background())
fd, err := o.Open()
require.NoError(t, err)
data, err := ioutil.ReadAll(fd)
require.NoError(t, err)
@@ -191,7 +162,7 @@ func TestOpen(t *testing.T) {
assert.Equal(t, "beetroot\n", string(data))
// Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = ioutil.ReadAll(fd)
require.NoError(t, err)
@@ -203,12 +174,12 @@ func TestMimeType(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt")
o, err := f.NewObject("four/under four.txt")
require.NoError(t, err)
do, ok := o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType())
}
func TestIsAFileRoot(t *testing.T) {
@@ -218,7 +189,7 @@ func TestIsAFileRoot(t *testing.T) {
f, err := NewFs(remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
testListRoot(t, f, false)
testListRoot(t, f)
}
func TestIsAFileSubDir(t *testing.T) {
@@ -228,7 +199,7 @@ func TestIsAFileSubDir(t *testing.T) {
f, err := NewFs(remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
entries, err := f.List(context.Background(), "")
entries, err := f.List("")
require.NoError(t, err)
sort.Sort(entries)

View File

@@ -1 +1 @@
<a href="two.html/file.txt">file.txt</a>
potato

View File

@@ -24,7 +24,7 @@
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td>&nbsp;</td></tr>
<tr><th colspan="5"><hr></th></tr>
<!-- some extras from https://github.com/rclone/rclone/issues/1573 -->
<!-- some extras from https://github.com/ncw/rclone/issues/1573 -->
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr>

View File

@@ -2,10 +2,8 @@ package hubic
import (
"net/http"
"time"
"github.com/ncw/swift"
"github.com/rclone/rclone/fs"
)
// auth is an authenticator for swift
@@ -23,17 +21,12 @@ func newAuth(f *Fs) *auth {
// Request constructs a http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials()
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
func (a *auth) Request(*swift.Connection) (*http.Request, error) {
err := a.f.getCredentials()
if err != nil {
return nil, err
}
return nil, err
return nil, nil
}
// Response parses the result of an http request

View File

@@ -9,22 +9,20 @@ package hubic
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"time"
"github.com/ncw/rclone/backend/swift"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/lib/oauthutil"
swiftLib "github.com/ncw/swift"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2"
)
@@ -62,13 +60,13 @@ func init() {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append([]fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Hubic Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Hubic Client Secret\nLeave blank normally.",
}}, swift.SharedOptions...),
}},
})
}
@@ -126,9 +124,7 @@ func (f *Fs) getCredentials() (err error) {
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode > 299 {
body, _ := ioutil.ReadAll(resp.Body)
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
return errors.Errorf("failed to get credentials: %s", resp.Status)
}
decoder := json.NewDecoder(resp.Body)
var result credentials

View File

@@ -4,16 +4,14 @@ package hubic_test
import (
"testing"
"github.com/rclone/rclone/backend/hubic"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/hubic"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil),
SkipFsCheckWrap: true,
SkipObjectCheckWrap: true,
RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil),
})
}

View File

@@ -1,349 +0,0 @@
package api
import (
"encoding/xml"
"fmt"
"time"
"github.com/pkg/errors"
)
const (
// default time format for almost all request and responses
timeFormat = "2006-01-02-T15:04:05Z0700"
// the API server seems to use a different format
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
)
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
type Time time.Time
// UnmarshalXML turns XML into a Time
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var v string
if err := d.DecodeElement(&v, &start); err != nil {
return err
}
if v == "" {
*t = Time(time.Time{})
return nil
}
newTime, err := time.Parse(timeFormat, v)
if err == nil {
*t = Time(newTime)
}
return err
}
// MarshalXML turns a Time into XML
func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return e.EncodeElement(t.String(), start)
}
// Return Time string in Jottacloud format
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
}
// JSON structures returned by new API
// AllocateFileRequest to prepare an upload to Jottacloud
type AllocateFileRequest struct {
Bytes int64 `json:"bytes"`
Created string `json:"created"`
Md5 string `json:"md5"`
Modified string `json:"modified"`
Path string `json:"path"`
}
// AllocateFileResponse for upload requests
type AllocateFileResponse struct {
Name string `json:"name"`
Path string `json:"path"`
State string `json:"state"`
UploadID string `json:"upload_id"`
UploadURL string `json:"upload_url"`
Bytes int64 `json:"bytes"`
ResumePos int64 `json:"resume_pos"`
}
// UploadResponse after an upload
type UploadResponse struct {
Name string `json:"name"`
Path string `json:"path"`
Kind string `json:"kind"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Created int64 `json:"created"`
Modified int64 `json:"modified"`
Deleted interface{} `json:"deleted"`
Mime string `json:"mime"`
}
// DeviceRegistrationResponse is the response to registering a device
type DeviceRegistrationResponse struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
}
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
type CustomerInfo struct {
Username string `json:"username"`
Email string `json:"email"`
Name string `json:"name"`
CountryCode string `json:"country_code"`
LanguageCode string `json:"language_code"`
CustomerGroupCode string `json:"customer_group_code"`
BrandCode string `json:"brand_code"`
AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"`
Qouta int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"`
BusinessQouta int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"`
LockedCause interface{} `json:"locked_cause"`
WebHash string `json:"web_hash"`
AndroidHash string `json:"android_hash"`
IOSHash string `json:"ios_hash"`
}
// XML structures returned by the old API
// Flag is a hacky type for checking if an attribute is present
type Flag bool
// UnmarshalXMLAttr sets Flag to true if the attribute is present
func (f *Flag) UnmarshalXMLAttr(attr xml.Attr) error {
*f = true
return nil
}
// MarshalXMLAttr : Do not use
func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
attr := xml.Attr{
Name: name,
Value: "false",
}
return attr, errors.New("unimplemented")
}
/*
GET http://www.jottacloud.com/JFS/<account>
<user time="2018-07-18-T21:39:10Z" host="dn-132">
<username>12qh1wsht8cssxdtwl15rqh9</username>
<account-type>free</account-type>
<locked>false</locked>
<capacity>5368709120</capacity>
<max-devices>-1</max-devices>
<max-mobile-devices>-1</max-mobile-devices>
<usage>0</usage>
<read-locked>false</read-locked>
<write-locked>false</write-locked>
<quota-write-locked>false</quota-write-locked>
<enable-sync>true</enable-sync>
<enable-foldershare>true</enable-foldershare>
<devices>
<device>
<name xml:space="preserve">Jotta</name>
<display_name xml:space="preserve">Jotta</display_name>
<type>JOTTA</type>
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
<size>0</size>
<modified>2018-07-15-T22:04:59Z</modified>
</device>
</devices>
</user>
*/
// DriveInfo represents a Jottacloud account
type DriveInfo struct {
Username string `xml:"username"`
AccountType string `xml:"account-type"`
Locked bool `xml:"locked"`
Capacity int64 `xml:"capacity"`
MaxDevices int `xml:"max-devices"`
MaxMobileDevices int `xml:"max-mobile-devices"`
Usage int64 `xml:"usage"`
ReadLocked bool `xml:"read-locked"`
WriteLocked bool `xml:"write-locked"`
QuotaWriteLocked bool `xml:"quota-write-locked"`
EnableSync bool `xml:"enable-sync"`
EnableFolderShare bool `xml:"enable-foldershare"`
Devices []JottaDevice `xml:"devices>device"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>
<device time="2018-07-23-T20:21:50Z" host="dn-158">
<name xml:space="preserve">Jotta</name>
<display_name xml:space="preserve">Jotta</display_name>
<type>JOTTA</type>
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
<size>0</size>
<modified>2018-07-15-T22:04:59Z</modified>
<user>12qh1wsht8cssxdtwl15rqh9</user>
<mountPoints>
<mountPoint>
<name xml:space="preserve">Archive</name>
<size>0</size>
<modified>2018-07-15-T22:04:59Z</modified>
</mountPoint>
<mountPoint>
<name xml:space="preserve">Shared</name>
<size>0</size>
<modified></modified>
</mountPoint>
<mountPoint>
<name xml:space="preserve">Sync</name>
<size>0</size>
<modified></modified>
</mountPoint>
</mountPoints>
<metadata first="" max="" total="3" num_mountpoints="3"/>
</device>
*/
// JottaDevice represents a Jottacloud Device
type JottaDevice struct {
Name string `xml:"name"`
DisplayName string `xml:"display_name"`
Type string `xml:"type"`
Sid string `xml:"sid"`
Size int64 `xml:"size"`
User string `xml:"user"`
MountPoints []JottaMountPoint `xml:"mountPoints>mountPoint"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>
<mountPoint time="2018-07-24-T20:35:02Z" host="dn-157">
<name xml:space="preserve">Sync</name>
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</path>
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</abspath>
<size>0</size>
<modified></modified>
<device>Jotta</device>
<user>12qh1wsht8cssxdtwl15rqh9</user>
<folders>
<folder name="test"/>
</folders>
<metadata first="" max="" total="1" num_folders="1" num_files="0"/>
</mountPoint>
*/
// JottaMountPoint represents a Jottacloud mountpoint
type JottaMountPoint struct {
Name string `xml:"name"`
Size int64 `xml:"size"`
Device string `xml:"device"`
Folders []JottaFolder `xml:"folders>folder"`
Files []JottaFile `xml:"files>file"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/<folder>
<folder name="test" time="2018-07-24-T20:41:37Z" host="dn-158">
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</path>
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</abspath>
<folders>
<folder name="t2"/>c
</folders>
<files>
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
<currentRevision>
<number>1</number>
<state>COMPLETED</state>
<created>2018-07-05-T15:08:02Z</created>
<modified>2018-07-05-T15:08:02Z</modified>
<mime>application/octet-stream</mime>
<size>30827730</size>
<md5>1e8a7b728ab678048df00075c9507158</md5>
<updated>2018-07-24-T20:41:10Z</updated>
</currentRevision>
</file>
</files>
<metadata first="" max="" total="2" num_folders="1" num_files="1"/>
</folder>
*/
// JottaFolder represents a JottacloudFolder
type JottaFolder struct {
XMLName xml.Name
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
Path string `xml:"path"`
CreatedAt Time `xml:"created"`
ModifiedAt Time `xml:"modified"`
Updated Time `xml:"updated"`
Folders []JottaFolder `xml:"folders>folder"`
Files []JottaFile `xml:"files>file"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
<currentRevision>
<number>1</number>
<state>COMPLETED</state>
<created>2018-07-05-T15:08:02Z</created>
<modified>2018-07-05-T15:08:02Z</modified>
<mime>application/octet-stream</mime>
<size>30827730</size>
<md5>1e8a7b728ab678048df00075c9507158</md5>
<updated>2018-07-24-T20:41:10Z</updated>
</currentRevision>
</file>
*/
// JottaFile represents a Jottacloud file
type JottaFile struct {
XMLName xml.Name
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
PublicSharePath string `xml:"publicSharePath"`
State string `xml:"currentRevision>state"`
CreatedAt Time `xml:"currentRevision>created"`
ModifiedAt Time `xml:"currentRevision>modified"`
Updated Time `xml:"currentRevision>updated"`
Size int64 `xml:"currentRevision>size"`
MimeType string `xml:"currentRevision>mime"`
MD5 string `xml:"currentRevision>md5"`
}
// Error is a custom Error for wrapping Jottacloud error responses
type Error struct {
StatusCode int `xml:"code"`
Message string `xml:"message"`
Reason string `xml:"reason"`
Cause string `xml:"cause"`
}
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("error %d", e.StatusCode)
if e.Message != "" {
out += ": " + e.Message
}
if e.Reason != "" {
out += fmt.Sprintf(" (%+v)", e.Reason)
}
return out
}

View File

@@ -1,29 +0,0 @@
package api
import (
"encoding/xml"
"testing"
"time"
)
func TestMountpointEmptyModificationTime(t *testing.T) {
mountpoint := `
<mountPoint time="2018-08-12-T09:58:24Z" host="dn-157">
<name xml:space="preserve">Sync</name>
<path xml:space="preserve">/foo/Jotta</path>
<abspath xml:space="preserve">/foo/Jotta</abspath>
<size>0</size>
<modified></modified>
<device>Jotta</device>
<user>foo</user>
<metadata first="" max="" total="0" num_folders="0" num_files="0"/>
</mountPoint>
`
var jf JottaFolder
if err := xml.Unmarshal([]byte(mountpoint), &jf); err != nil {
t.Fatal(err)
}
if !time.Time(jf.ModifiedAt).IsZero() {
t.Errorf("got non-zero time, want zero")
}
}

Some files were not shown because too many files have changed in this diff Show More