1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-18 09:13:15 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
77b1eaeffe drive: implement --drive-untrash flag - FIXME needs docs 2018-07-18 22:35:56 +01:00
6972 changed files with 5182046 additions and 127064 deletions

View File

@@ -4,9 +4,6 @@ os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\ncw\rclone clone_folder: c:\gopath\src\github.com\ncw\rclone
cache:
- '%LocalAppData%\go-build'
environment: environment:
GOPATH: C:\gopath GOPATH: C:\gopath
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
@@ -46,4 +43,4 @@ artifacts:
- path: build/*-v*.zip - path: build/*-v*.zip
deploy_script: deploy_script:
- IF "%APPVEYOR_REPO_NAME%" == "ncw/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload - IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload

View File

@@ -1,31 +0,0 @@
<!--
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
https://forum.rclone.org/
instead of filing an issue for a quick response.
If you are reporting a bug or asking for a new feature then please use one of the templates here:
https://github.com/ncw/rclone/issues/new
otherwise fill in the form below.
Thank you
The Rclone Developers
-->
#### Output of `rclone version`
#### Describe the issue

View File

@@ -1,50 +0,0 @@
---
name: Bug report
about: Report a problem with rclone
---
<!--
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
https://forum.rclone.org/
instead of filing an issue for a quick response.
If you think you might have found a bug, please can you try to replicate it with the latest beta?
https://beta.rclone.org/
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
Thank you
The Rclone Developers
-->
#### What is the problem you are having with rclone?
#### What is your rclone version (output from `rclone version`)
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which cloud storage system are you using? (eg Google Drive)
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)

View File

@@ -1,36 +0,0 @@
---
name: Feature request
about: Suggest a new feature or enhancement for rclone
---
<!--
Welcome :-)
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
Here is a checklist of things to do:
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
2. Discuss on the forum first: https://forum.rclone.org/
3. Make a feature request issue (this is the right place!).
4. Be prepared to get involved making the feature :-)
Looking forward to your great idea!
The Rclone Developers
-->
#### What is your current rclone version (output from `rclone version`)?
#### What problem are you are trying to solve?
#### How do you think rclone should be changed to solve that?

View File

@@ -1,29 +0,0 @@
<!--
Thank you very much for contributing code or documentation to rclone! Please
fill out the following questions to make it easier for us to review your
changes.
You do not need to check all the boxes below all at once, feel free to take
your time and add more commits. If you're done and ready for review, please
check the last box.
-->
#### What is the purpose of this change?
<!--
Describe the changes here
-->
#### Was the change discussed in an issue or in the forum before?
<!--
Link issues and relevant forum posts here.
-->
#### Checklist
- [ ] I have read the [contribution guidelines](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
- [ ] I have added tests for all changes in this PR if appropriate.
- [ ] I have added documentation for the changes if appropriate.
- [ ] All commit messages are in [house style](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#commit-messages).
- [ ] I'm done, this Pull Request is ready for review :-)

View File

@@ -4,12 +4,11 @@ dist: trusty
os: os:
- linux - linux
go: go:
- 1.8.x - 1.7.6
- 1.9.x - 1.8.7
- 1.10.x - 1.9.3
- 1.11.x - "1.10.1"
- tip - tip
go_import_path: github.com/ncw/rclone
before_install: before_install:
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi - if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi - if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
@@ -34,25 +33,18 @@ addons:
- libfuse-dev - libfuse-dev
- rpm - rpm
- pkg-config - pkg-config
cache:
directories:
- $HOME/.cache/go-build
matrix: matrix:
allow_failures: allow_failures:
- go: tip - go: tip
include: include:
- os: osx - os: osx
go: 1.11.x go: "1.10.1"
env: GOTAGS="" env: GOTAGS=""
cache:
directories:
- $HOME/Library/Caches/go-build
deploy: deploy:
provider: script provider: script
script: make travis_beta script: make travis_beta
skip_cleanup: true skip_cleanup: true
on: on:
repo: ncw/rclone
all_branches: true all_branches: true
go: 1.11.x go: "1.10.1"
condition: $TRAVIS_PULL_REQUEST == false condition: $TRAVIS_PULL_REQUEST == false

View File

@@ -21,19 +21,19 @@ with the [latest beta of rclone](https://beta.rclone.org/):
## Submitting a pull request ## ## Submitting a pull request ##
If you find a bug that you'd like to fix, or a new feature that you'd If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub. like to implement then please submit a pull request via Github.
If it is a big feature then make an issue first so it can be discussed. If it is a big feature then make an issue first so it can be discussed.
You'll need a Go environment set up with GOPATH set. See [the Go You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info. getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's GitHub First in your web browser press the fork button on [rclone's Github
page](https://github.com/ncw/rclone). page](https://github.com/ncw/rclone).
Now in your terminal Now in your terminal
go get -u github.com/ncw/rclone go get github.com/ncw/rclone
cd $GOPATH/src/github.com/ncw/rclone cd $GOPATH/src/github.com/ncw/rclone
git remote rename origin upstream git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git git remote add origin git@github.com:YOURUSER/rclone.git
@@ -64,31 +64,22 @@ packages which you can install with
Make sure you Make sure you
* Add [documentation](#writing-documentation) for a new feature. * Add documentation for a new feature (see below for where)
* Follow the [commit message guidelines](#commit-messages). * Add unit tests for a new feature
* Add [unit tests](#testing) for a new feature
* squash commits down to one per feature * squash commits down to one per feature
* rebase to master with `git rebase master` * rebase to master `git rebase master`
When you are done with that When you are done with that
git push origin my-new-feature git push origin my-new-feature
Go to the GitHub website and click [Create pull Go to the Github website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/). request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff. You patch will get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, squash the commits, If so, then make the changes in the same branch, squash the commits,
rebase it to master then push it to GitHub with `--force`. rebase it to master then push it to Github with `--force`.
## Enabling CI for your fork ##
The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
websites, find your fork of rclone, and enable building there.
## Testing ## ## Testing ##
@@ -123,13 +114,6 @@ but they can be run against any of the remotes.
cd fs/operations cd fs/operations
go test -v -remote TestDrive: go test -v -remote TestDrive:
If you want to use the integration test framework to run these tests
all together with an HTML report and test retries then from the
project root:
go install github.com/ncw/rclone/fstest/test_all
test_all -backend drive
If you want to run all the integration tests against all the remotes, If you want to run all the integration tests against all the remotes,
then change into the project root and run then change into the project root and run
@@ -182,21 +166,17 @@ with modules beneath.
* pacer - retries with backoff and paces operations * pacer - retries with backoff and paces operations
* readers - a selection of useful io.Readers * readers - a selection of useful io.Readers
* rest - a thin abstraction over net/http for REST * rest - a thin abstraction over net/http for REST
* vendor - 3rd party code managed by `go mod` * vendor - 3rd party code managed by the dep tool
* vfs - Virtual FileSystem layer for implementing rclone mount and similar * vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation ## ## Writing Documentation ##
If you are adding a new feature then please update the documentation. If you are adding a new feature then please update the documentation.
If you add a new general flag (not for a backend), then document it in If you add a new flag, then if it is a general flag, document it in
`docs/content/docs.md` - the flags there are supposed to be in `docs/content/docs.md` - the flags there are supposed to be in
alphabetical order. alphabetical order. If it is a remote specific flag, then document it
in `docs/content/remote.md`.
If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field. The first line of this is used
for the flag help, the remainder is shown to the user in `rclone
config` and is added to the docs with `make backenddocs`.
The only documentation you need to edit are the `docs/content/*.md` The only documentation you need to edit are the `docs/content/*.md`
files. The MANUAL.*, rclone.1, web site etc are all auto generated files. The MANUAL.*, rclone.1, web site etc are all auto generated
@@ -215,20 +195,14 @@ file.
## Commit messages ## ## Commit messages ##
Please make the first line of your commit message a summary of the Please make the first line of your commit message a summary of the
change that a user (not a developer) of rclone would like to read, and change, and prefix it with the directory of the change followed by a
prefix it with the directory of the change followed by a colon. The colon. The changelog gets made by looking at just these first lines
changelog gets made by looking at just these first lines so make it so make it good!
good!
If you have more to say about the commit, then enter a blank line and If you have more to say about the commit, then enter a blank line and
carry on the description. Remember to say why the change was needed - carry on the description. Remember to say why the change was needed -
the commit itself shows what was changed. the commit itself shows what was changed.
Writing more is better than less. Comparing the behaviour before the
change to that after the change is very useful. Imagine you are
writing to yourself in 12 months time when you've forgotten everything
about what you just did and you need to get up to speed quickly.
If the change fixes an issue then write `Fixes #1234` in the commit If the change fixes an issue then write `Fixes #1234` in the commit
message. This can be on the subject line if it will fit. If you message. This can be on the subject line if it will fit. If you
don't want to close the associated issue just put `#1234` and the don't want to close the associated issue just put `#1234` and the
@@ -255,53 +229,37 @@ Fixes #1498
## Adding a dependency ## ## Adding a dependency ##
rclone uses the [go rclone uses the [dep](https://github.com/golang/dep) tool to manage
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more) its dependencies. All code that rclone needs for building is stored
support in go1.11 and later to manage its dependencies. in the `vendor` directory for perfectly reproducable builds.
**NB** you must be using go1.11 or above to add a dependency to The `vendor` directory is entirely managed by the `dep` tool.
rclone. Rclone will still build with older versions of go, but we use
the `go mod` command for dependencies which is only in go1.11 and
above.
rclone can be built with modules outside of the GOPATH, but for To add a new dependency, run `dep ensure` and `dep` will pull in the
backwards compatibility with older go versions, rclone also maintains new dependency to the `vendor` directory and update the `Gopkg.lock`
a `vendor` directory with all the external code rclone needs for file.
building.
The `vendor` directory is entirely managed by the `go mod` tool, do You can add constraints on that package in the `Gopkg.toml` file (see
not add things manually. the `dep` documentation), but don't unless you really need to.
To add a dependency `github.com/ncw/new_dependency` see the Please check in the changes generated by `dep` including the `vendor`
instructions below. These will fetch the dependency, add it to directory and `Godep.toml` and `Godep.lock` in a single commit
`go.mod` and `go.sum` and vendor it for older go versions. separate from any other code changes. Watch out for new files in
`vendor`.
GO111MODULE=on go get github.com/ncw/new_dependency
GO111MODULE=on go mod vendor
You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to.
Please check in the changes generated by `go mod` including the
`vendor` directory and `go.mod` and `go.sum` in a single commit
separate from any other code changes with the title "vendor: add
github.com/ncw/new_dependency". Remember to `git add` any new files
in `vendor`.
## Updating a dependency ## ## Updating a dependency ##
If you need to update a dependency then run If you need to update a dependency then run
GO111MODULE=on go get -u github.com/pkg/errors dep ensure -update github.com/pkg/errors
GO111MODULE=on go mod vendor
Check in in a single commit as above. Check in in a single commit as above.
## Updating all the dependencies ## ## Updating all the dependencies ##
In order to update all the dependencies then run `make update`. This In order to update all the dependencies then run `make update`. This
just uses the go modules to update all the modules to their latest just runs `dep ensure -update`. Check in the changes in a single
stable release. Check in the changes in a single commit as above. commit as above.
This should be done early in the release cycle to pick up new versions This should be done early in the release cycle to pick up new versions
of packages in time for them to get some testing. of packages in time for them to get some testing.
@@ -350,7 +308,7 @@ Unit tests
Integration tests Integration tests
* Add your backend to `fstest/test_all/config.yaml` * Add your fs to `fstest/test_all/test_all.go`
* Make sure integration tests pass with * Make sure integration tests pass with
* `cd fs/operations` * `cd fs/operations`
* `go test -v -remote TestRemote:` * `go test -v -remote TestRemote:`
@@ -365,8 +323,8 @@ See the [testing](#testing) section for more information on integration tests.
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last. Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
* `README.md` - main GitHub page * `README.md` - main Github page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`) * `docs/content/remote.md` - main docs page
* `docs/content/overview.md` - overview docs * `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section * `docs/content/docs.md` - list of remotes in config section
* `docs/content/about.md` - front page of rclone.org * `docs/content/about.md` - front page of rclone.org

463
Gopkg.lock generated Normal file
View File

@@ -0,0 +1,463 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
name = "bazil.org/fuse"
packages = [
".",
"fs",
"fuseutil"
]
revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e"
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata"]
revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479"
version = "v0.23.0"
[[projects]]
name = "github.com/Azure/azure-pipeline-go"
packages = ["pipeline"]
revision = "7571e8eb0876932ab505918ff7ed5107773e5ee2"
version = "0.1.7"
[[projects]]
branch = "master"
name = "github.com/Azure/azure-storage-blob-go"
packages = ["2018-03-28/azblob"]
revision = "eaae161d9d5e07363f04ddb19d84d57efc66d1a1"
[[projects]]
branch = "master"
name = "github.com/Unknwon/goconfig"
packages = ["."]
revision = "ef1e4c783f8f0478bd8bff0edb3dd0bade552599"
[[projects]]
name = "github.com/VividCortex/ewma"
packages = ["."]
revision = "b24eb346a94c3ba12c1da1e564dbac1b498a77ce"
version = "v1.1.1"
[[projects]]
branch = "master"
name = "github.com/a8m/tree"
packages = ["."]
revision = "3cf936ce15d6100c49d9c75f79c220ae7e579599"
[[projects]]
name = "github.com/abbot/go-http-auth"
packages = ["."]
revision = "0ddd408d5d60ea76e320503cc7dd091992dee608"
version = "v0.4.0"
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
"aws/awserr",
"aws/awsutil",
"aws/client",
"aws/client/metadata",
"aws/corehandlers",
"aws/credentials",
"aws/credentials/ec2rolecreds",
"aws/credentials/endpointcreds",
"aws/credentials/stscreds",
"aws/csm",
"aws/defaults",
"aws/ec2metadata",
"aws/endpoints",
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/sdkio",
"internal/sdkrand",
"internal/shareddefaults",
"private/protocol",
"private/protocol/eventstream",
"private/protocol/eventstream/eventstreamapi",
"private/protocol/query",
"private/protocol/query/queryutil",
"private/protocol/rest",
"private/protocol/restxml",
"private/protocol/xml/xmlutil",
"service/s3",
"service/s3/s3iface",
"service/s3/s3manager",
"service/sts"
]
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
version = "v1.14.8"
[[projects]]
name = "github.com/billziss-gh/cgofuse"
packages = ["fuse"]
revision = "ea66f9809c71af94522d494d3d617545662ea59d"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/coreos/bbolt"
packages = ["."]
revision = "af9db2027c98c61ecd8e17caa5bd265792b9b9a2"
[[projects]]
name = "github.com/cpuguy83/go-md2man"
packages = ["md2man"]
revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1"
version = "v1.0.8"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/djherbis/times"
packages = ["."]
revision = "95292e44976d1217cf3611dc7c8d9466877d3ed5"
version = "v1.0.1"
[[projects]]
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
packages = [
"dropbox",
"dropbox/async",
"dropbox/common",
"dropbox/file_properties",
"dropbox/files",
"dropbox/seen_state",
"dropbox/sharing",
"dropbox/team_common",
"dropbox/team_policies",
"dropbox/users",
"dropbox/users_common"
]
revision = "7afa861bfde5a348d765522b303b6fbd9d250155"
version = "v4.1.0"
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
version = "v1.37.0"
[[projects]]
name = "github.com/golang/protobuf"
packages = ["proto"]
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/google/go-querystring"
packages = ["query"]
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
[[projects]]
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
branch = "master"
name = "github.com/jlaffaye/ftp"
packages = ["."]
revision = "2403248fa8cc9f7909862627aa7337f13f8e0bf1"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "0b12d6b5"
[[projects]]
branch = "master"
name = "github.com/kardianos/osext"
packages = ["."]
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
[[projects]]
name = "github.com/kr/fs"
packages = ["."]
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
version = "v0.1.0"
[[projects]]
name = "github.com/mattn/go-runewidth"
packages = ["."]
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
version = "v0.0.2"
[[projects]]
branch = "master"
name = "github.com/ncw/go-acd"
packages = ["."]
revision = "887eb06ab6a255fbf5744b5812788e884078620a"
[[projects]]
name = "github.com/ncw/swift"
packages = ["."]
revision = "b2a7479cf26fa841ff90dd932d0221cb5c50782d"
version = "v1.0.39"
[[projects]]
branch = "master"
name = "github.com/nsf/termbox-go"
packages = ["."]
revision = "5c94acc5e6eb520f1bcd183974e01171cc4c23b3"
[[projects]]
branch = "master"
name = "github.com/okzk/sdnotify"
packages = ["."]
revision = "ed8ca104421a21947710335006107540e3ecb335"
[[projects]]
name = "github.com/patrickmn/go-cache"
packages = ["."]
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
version = "v2.1.0"
[[projects]]
name = "github.com/pengsrc/go-shared"
packages = [
"buffer",
"check",
"convert",
"log",
"reopen"
]
revision = "807ee759d82c84982a89fb3dc875ef884942f1e5"
version = "v0.2.0"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
name = "github.com/pkg/sftp"
packages = ["."]
revision = "57673e38ea946592a59c26592b7e6fbda646975b"
version = "1.8.0"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
name = "github.com/rfjakob/eme"
packages = ["."]
revision = "01668ae55fe0b79a483095689043cce3e80260db"
version = "v1.1"
[[projects]]
name = "github.com/russross/blackfriday"
packages = ["."]
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
version = "v1.5.1"
[[projects]]
branch = "master"
name = "github.com/sevlyar/go-daemon"
packages = ["."]
revision = "f9261e73885de99b1647d68bedadf2b9a99ad11f"
[[projects]]
branch = "master"
name = "github.com/skratchdot/open-golang"
packages = ["open"]
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
[[projects]]
name = "github.com/spf13/cobra"
packages = [
".",
"doc"
]
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
version = "v0.0.3"
[[projects]]
name = "github.com/spf13/pflag"
packages = ["."]
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
[[projects]]
name = "github.com/stretchr/testify"
packages = [
"assert",
"require"
]
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
version = "v1.2.2"
[[projects]]
branch = "master"
name = "github.com/t3rm1n4l/go-mega"
packages = ["."]
revision = "57978a63bd3f91fa7e188b751a7e7e6dd4e33813"
[[projects]]
branch = "master"
name = "github.com/xanzy/ssh-agent"
packages = ["."]
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
[[projects]]
name = "github.com/yunify/qingstor-sdk-go"
packages = [
".",
"config",
"logger",
"request",
"request/builder",
"request/data",
"request/errors",
"request/signer",
"request/unpacker",
"service",
"utils"
]
revision = "4f9ac88c5fec7350e960aabd0de1f1ede0ad2895"
version = "v2.2.14"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
"blowfish",
"curve25519",
"ed25519",
"ed25519/internal/edwards25519",
"internal/chacha20",
"internal/subtle",
"nacl/secretbox",
"pbkdf2",
"poly1305",
"salsa20/salsa",
"scrypt",
"ssh",
"ssh/agent",
"ssh/terminal"
]
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"html",
"html/atom",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"publicsuffix",
"webdav",
"webdav/internal/xml",
"websocket"
]
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [
".",
"google",
"internal",
"jws",
"jwt"
]
revision = "1e0a3fa8ba9a5c9eb35c271780101fdaf1b205d7"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = [
"unix",
"windows"
]
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
[[projects]]
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
]
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "golang.org/x/time"
packages = ["rate"]
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
[[projects]]
branch = "master"
name = "google.golang.org/api"
packages = [
"drive/v3",
"gensupport",
"googleapi",
"googleapi/internal/uritemplates",
"storage/v1"
]
revision = "2eea9ba0a3d94f6ab46508083e299a00bbbc65f6"
[[projects]]
name = "google.golang.org/appengine"
packages = [
".",
"internal",
"internal/app_identity",
"internal/base",
"internal/datastore",
"internal/log",
"internal/modules",
"internal/remote_api",
"internal/urlfetch",
"urlfetch"
]
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
version = "v1.1.0"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "670cdb55138aa1394b4c8f87345e9be9c8105248edda4be7176dddee2a4f5d26"
solver-name = "gps-cdcl"
solver-version = 1

15
Gopkg.toml Normal file
View File

@@ -0,0 +1,15 @@
# pin this to master to pull in the macOS changes
# can likely remove for 1.43
[[override]]
branch = "master"
name = "github.com/sevlyar/go-daemon"
# pin this to master to pull in the fix for linux/mips
# can likely remove for 1.43
[[override]]
branch = "master"
name = "github.com/coreos/bbolt"
[[constraint]]
branch = "master"
name = "github.com/Azure/azure-storage-blob-go"

43
ISSUE_TEMPLATE.md Normal file
View File

@@ -0,0 +1,43 @@
<!--
Hi!
We understand you are having a problem with rclone or have an idea for an improvement - we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum
https://forum.rclone.org/
instead of filing an issue. We'll reply quickly and it won't increase our massive issue backlog.
If you think you might have found a bug, please can you try to replicate it with the latest beta?
https://beta.rclone.org/
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
If you have an idea for an improvement, then please search the old issues first and if you don't find your idea, make a new issue.
Thanks
The Rclone Developers
-->
#### What is the problem you are having with rclone?
#### What is your rclone version (eg output from `rclone -V`)
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which cloud storage system are you using? (eg Google Drive)
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)

View File

@@ -7,8 +7,6 @@ Current active maintainers of rclone are
* Ishuah Kariuki @ishuah * Ishuah Kariuki @ishuah
* Remus Bunduc @remusb - cache subsystem maintainer * Remus Bunduc @remusb - cache subsystem maintainer
* Fabian Möller @B4dM4n * Fabian Möller @B4dM4n
* Alex Chen @Cnly
* Sandeep Ummadi @sandeepkru
**This is a work in progress Draft** **This is a work in progress Draft**
@@ -58,7 +56,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
Try to process pull requests promptly! Try to process pull requests promptly!
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message. Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`. After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.

File diff suppressed because it is too large Load Diff

4878
MANUAL.md

File diff suppressed because it is too large Load Diff

4927
MANUAL.txt

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,5 @@
SHELL = bash SHELL = bash
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD)) BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
LAST_TAG := $(shell git describe --tags --abbrev=0)
ifeq ($(BRANCH),$(LAST_TAG))
BRANCH := master
endif
TAG_BRANCH := -$(BRANCH) TAG_BRANCH := -$(BRANCH)
BRANCH_PATH := branch/ BRANCH_PATH := branch/
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),) ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
@@ -11,14 +7,12 @@ ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
BRANCH_PATH := BRANCH_PATH :=
endif endif
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH) TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
LAST_TAG := $(shell git describe --tags --abbrev=0)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)') NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
ifneq ($(TAG),$(LAST_TAG))
TAG := $(TAG)-beta
endif
GO_VERSION := $(shell go version) GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ ) GO_FILES := $(shell go list ./... | grep -v /vendor/ )
# Run full tests if go >= go1.11 # Run full tests if go >= go1.9
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 11)') FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 9)')
BETA_PATH := $(BRANCH_PATH)$(TAG) BETA_PATH := $(BRANCH_PATH)$(TAG)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/ BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org BETA_UPLOAD_ROOT := memstore:beta-rclone-org
@@ -50,9 +44,10 @@ version:
# Full suite of integration tests # Full suite of integration tests
test: rclone test: rclone
go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all go install github.com/ncw/rclone/fstest/test_all
-test_all 2>&1 | tee test_all.log -go test -v -count 1 $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
@echo "Written logs in test_all.log" -test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
@echo "Written logs in test.log and fs/test_all.log"
# Quick test # Quick test
quicktest: quicktest:
@@ -87,7 +82,8 @@ build_dep:
ifdef FULL_TESTS ifdef FULL_TESTS
go get -u github.com/kisielk/errcheck go get -u github.com/kisielk/errcheck
go get -u golang.org/x/tools/cmd/goimports go get -u golang.org/x/tools/cmd/goimports
go get -u golang.org/x/lint/golint go get -u github.com/golang/lint/golint
go get -u github.com/tools/godep
endif endif
# Get the release dependencies # Get the release dependencies
@@ -97,16 +93,15 @@ release_dep:
# Update dependencies # Update dependencies
update: update:
GO111MODULE=on go get -u ./... go get -u github.com/golang/dep/cmd/dep
GO111MODULE=on go mod tidy dep ensure -update -v
GO111MODULE=on go mod vendor
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs doc: rclone.1 MANUAL.html MANUAL.txt
rclone.1: MANUAL.md rclone.1: MANUAL.md
pandoc -s --from markdown --to man MANUAL.md -o rclone.1 pandoc -s --from markdown --to man MANUAL.md -o rclone.1
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs
./bin/make_manual.py ./bin/make_manual.py
MANUAL.html: MANUAL.md MANUAL.html: MANUAL.md
@@ -118,9 +113,6 @@ MANUAL.txt: MANUAL.md
commanddocs: rclone commanddocs: rclone
rclone gendocs docs/content/commands/ rclone gendocs docs/content/commands/
backenddocs: rclone bin/make_backend_docs.py
./bin/make_backend_docs.py
rcdocs: rclone rcdocs: rclone
bin/make_rc_docs.sh bin/make_rc_docs.sh
@@ -154,8 +146,8 @@ check_sign:
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
upload: upload:
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG) rclone -v copy --exclude '*current*' build/ memstore:downloads-rclone-org/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"' rclone -v copy --include '*current*' --include version.txt build/ memstore:downloads-rclone-org
upload_github: upload_github:
./bin/upload-github $(TAG) ./bin/upload-github $(TAG)
@@ -164,16 +156,16 @@ cross: doc
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG) go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
beta: beta:
go run bin/cross-compile.go $(BUILDTAGS) $(TAG) go run bin/cross-compile.go $(BUILDTAGS) $(TAG)β
rclone -v copy build/ memstore:pub-rclone-org/$(TAG) rclone -v copy build/ memstore:pub-rclone-org/$(TAG)β
@echo Beta release ready at https://pub.rclone.org/$(TAG)/ @echo Beta release ready at https://pub.rclone.org/$(TAG)%CE%B2/
log_since_last_release: log_since_last_release:
git log $(LAST_TAG).. git log $(LAST_TAG)..
compile_all: compile_all:
ifdef FULL_TESTS ifdef FULL_TESTS
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG) go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)β
else else
@echo Skipping compile all as version of go too old @echo Skipping compile all as version of go too old
endif endif
@@ -195,16 +187,19 @@ ifeq ($(TRAVIS_OS_NAME),linux)
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz' go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
endif endif
git log $(LAST_TAG).. > /tmp/git-log.txt git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG) go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)β
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT) rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif endif
@echo Beta release ready at $(BETA_URL) @echo Beta release ready at $(BETA_URL)
# Fetch the binary builds from travis and appveyor # Fetch the windows builds from appveyor
fetch_binaries: fetch_windows:
rclone -P sync $(BETA_UPLOAD) build/ rclone -v copy --include 'rclone-v*-windows-*.zip' $(BETA_UPLOAD) build/
-#cp -av build/rclone-v*-windows-386.zip build/rclone-current-windows-386.zip
-#cp -av build/rclone-v*-windows-amd64.zip build/rclone-current-windows-amd64.zip
md5sum build/rclone-*-windows-*.zip | sort
serve: website serve: website
cd docs && hugo server -v -w cd docs && hugo server -v -w
@@ -215,10 +210,10 @@ tag: doc
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG) git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
mv docs/content/changelog.md.new docs/content/changelog.md
@echo "Edit the new changelog in docs/content/changelog.md" @echo "Edit the new changelog in docs/content/changelog.md"
@echo "Then commit all the changes" @echo " * $(NEW_TAG) -" `date -I` >> docs/content/changelog.md
@git log $(LAST_TAG)..$(NEW_TAG) --oneline >> docs/content/changelog.md
@echo "Then commit the changes"
@echo git commit -m \"Version $(NEW_TAG)\" -a -v @echo git commit -m \"Version $(NEW_TAG)\" -a -v
@echo "And finally run make retag before make cross etc" @echo "And finally run make retag before make cross etc"
@@ -231,3 +226,4 @@ startdev:
winzip: winzip:
zip -9 rclone-$(TAG).zip rclone.exe zip -9 rclone-$(TAG).zip rclone.exe

View File

@@ -2,11 +2,10 @@
[Website](https://rclone.org) | [Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) | [Documentation](https://rclone.org/docs/) |
[Download](https://rclone.org/downloads/) |
[Contributing](CONTRIBUTING.md) | [Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) | [Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) | [Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/) | [Forum](https://forum.rclone.org/)
[G+](https://google.com/+RcloneOrg) [G+](https://google.com/+RcloneOrg)
[![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone) [![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone)
@@ -14,81 +13,49 @@
[![CircleCI](https://circleci.com/gh/ncw/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/ncw/rclone/tree/master) [![CircleCI](https://circleci.com/gh/ncw/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/ncw/rclone/tree/master)
[![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone) [![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
# Rclone Rclone is a command line program to sync files and directories to and from
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers. * Amazon Drive ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
* Backblaze B2
* Box
* Dropbox
* FTP
* Google Cloud Storage
* Google Drive
* HTTP
* Hubic
* Mega
* Microsoft Azure Blob Storage
* Microsoft OneDrive
* OpenDrive
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
* pCloud
* QingStor
* SFTP
* Webdav / Owncloud / Nextcloud
* Yandex Disk
* The local filesystem
## Storage providers Features
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* Openstack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
## Features
* MD5/SHA1 hashes checked at all times for file integrity * MD5/SHA1 hashes checked at all times for file integrity
* Timestamps preserved on files * Timestamps preserved on files
* Partial syncs supported on a whole file basis * Partial syncs supported on a whole file basis
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files * Copy mode to just copy new/changed files
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical * Sync (one way) mode to make a directory identical
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality * Check mode to check for file hash equality
* Can sync to and from network, eg two different cloud accounts * Can sync to and from network, eg two different cloud accounts
* Optional encryption ([Crypt](https://rclone.org/crypt/)) * Optional encryption (Crypt)
* Optional cache ([Cache](https://rclone.org/cache/)) * Optional FUSE mount
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
## Installation & documentation See the home page for installation, usage, documentation, changelog
and configuration walkthroughs.
Please see the [rclone website](https://rclone.org/) for: * https://rclone.org/
* [Installation](https://rclone.org/install/)
* [Documentation & configuration](https://rclone.org/docs/)
* [Changelog](https://rclone.org/changelog/)
* [FAQ](https://rclone.org/faq/)
* [Storage providers](https://rclone.org/overview/)
* [Forum](https://forum.rclone.org/)
* ...and more
## Downloads
* https://rclone.org/downloads/
License License
------- -------
This is free software under the terms of MIT the license (check the This is free software under the terms of MIT the license (check the
[COPYING file](/rclone/COPYING) included in this package). COPYING file included in this package).

View File

@@ -13,9 +13,14 @@ Making a release
* git status - to check for new man pages - git add them * git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX" * git commit -a -v -m "Version v1.XX"
* make retag * make retag
* make release_dep
* # Set the GOPATH for a current stable go compiler
* make cross
* git checkout docs/content/commands # to undo date changes in commands
* git push --tags origin master * git push --tags origin master
* # Wait for the appveyor and travis builds to complete then... * git push --tags origin master:stable # update the stable branch for packager.io
* make fetch_binaries * # Wait for the appveyor and travis builds to complete then fetch the windows binaries from appveyor
* make fetch_windows
* make tarball * make tarball
* make sign_upload * make sign_upload
* make check_sign * make check_sign
@@ -26,45 +31,11 @@ Making a release
* # announce with forum post, twitter post, G+ post * # announce with forum post, twitter post, G+ post
Early in the next release cycle update the vendored dependencies Early in the next release cycle update the vendored dependencies
* Review any pinned packages in go.mod and remove if possible * Review any pinned packages in Gopkg.toml and remove if possible
* make update * make update
* git status * git status
* git add new files * git add new files
* carry forward any patches to vendor stuff
* git commit -a -v * git commit -a -v
If `make update` fails with errors like this: Make the version number be just in a file?
```
# github.com/cpuguy83/go-md2man/md2man
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
```
Can be fixed with
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
* GO111MODULE=on go mod tidy
* GO111MODULE=on go mod vendor
Making a point release. If rclone needs a point release due to some
horrendous bug, then
* git branch v1.XX v1.XX-fixes
* git cherry-pick any fixes
* Test (see above)
* make NEW_TAG=v1.XX.1 tag
* edit docs/content/changelog.md
* make TAG=v1.43.1 doc
* git commit -a -v -m "Version v1.XX.1"
* git tag -d -v1.XX.1
* git tag -s -m "Version v1.XX.1" v1.XX.1
* git push --tags -u origin v1.XX-fixes
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
* make TAG=v1.43.1 tarball
* make TAG=v1.43.1 sign_upload
* make TAG=v1.43.1 check_sign
* make TAG=v1.43.1 upload
* make TAG=v1.43.1 upload_website
* make TAG=v1.43.1 upload_github
* NB this overwrites the current beta so after the release, rebuild the last travis build
* Announce!

View File

@@ -2,12 +2,13 @@ package alias
import ( import (
"errors" "errors"
"path"
"path/filepath"
"strings" "strings"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fspath"
) )
// Register with Fs // Register with Fs
@@ -46,9 +47,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if strings.HasPrefix(opt.Remote, name+":") { if strings.HasPrefix(opt.Remote, name+":") {
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting") return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
} }
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote) _, configName, fsPath, err := fs.ParseRemote(opt.Remote)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config) root = path.Join(fsPath, filepath.ToSlash(root))
if configName == "local" {
return fs.NewFs(root)
}
return fs.NewFs(configName + ":" + root)
} }

View File

@@ -15,7 +15,6 @@ import (
_ "github.com/ncw/rclone/backend/googlecloudstorage" _ "github.com/ncw/rclone/backend/googlecloudstorage"
_ "github.com/ncw/rclone/backend/http" _ "github.com/ncw/rclone/backend/http"
_ "github.com/ncw/rclone/backend/hubic" _ "github.com/ncw/rclone/backend/hubic"
_ "github.com/ncw/rclone/backend/jottacloud"
_ "github.com/ncw/rclone/backend/local" _ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/mega" _ "github.com/ncw/rclone/backend/mega"
_ "github.com/ncw/rclone/backend/onedrive" _ "github.com/ncw/rclone/backend/onedrive"
@@ -25,7 +24,6 @@ import (
_ "github.com/ncw/rclone/backend/s3" _ "github.com/ncw/rclone/backend/s3"
_ "github.com/ncw/rclone/backend/sftp" _ "github.com/ncw/rclone/backend/sftp"
_ "github.com/ncw/rclone/backend/swift" _ "github.com/ncw/rclone/backend/swift"
_ "github.com/ncw/rclone/backend/union"
_ "github.com/ncw/rclone/backend/webdav" _ "github.com/ncw/rclone/backend/webdav"
_ "github.com/ncw/rclone/backend/yandex" _ "github.com/ncw/rclone/backend/yandex"
) )

View File

@@ -98,41 +98,12 @@ func init() {
Advanced: true, Advanced: true,
}, { }, {
Name: "upload_wait_per_gb", Name: "upload_wait_per_gb",
Help: `Additional time per GB to wait after a failed complete upload to see if it appears. Help: "Additional time per GB to wait after a failed complete upload to see if it appears.",
Sometimes Amazon Drive gives an error when a file has been fully
uploaded but the file appears anyway after a little while. This
happens sometimes for files over 1GB in size and nearly every time for
files bigger than 10GB. This parameter controls the time rclone waits
for the file to appear.
The default value for this parameter is 3 minutes per GB, so by
default it will wait 3 minutes for every GB uploaded to see if the
file appears.
You can disable this feature by setting it to 0. This may cause
conflict errors as rclone retries the failed upload but the file will
most likely appear correctly eventually.
These values were determined empirically by observing lots of uploads
of big files for a range of file sizes.
Upload with the "-v" flag to see more info about what rclone is doing
in this situation.`,
Default: fs.Duration(180 * time.Second), Default: fs.Duration(180 * time.Second),
Advanced: true, Advanced: true,
}, { }, {
Name: "templink_threshold", Name: "templink_threshold",
Help: `Files >= this size will be downloaded via their tempLink. Help: "Files >= this size will be downloaded via their tempLink.",
Files this size or more will be downloaded via their "tempLink". This
is to work around a problem with Amazon Drive which blocks downloads
of files bigger than about 10GB. The default for this is 9GB which
shouldn't need to be changed.
To download files above this threshold, rclone requests a "tempLink"
which downloads the file through a temporary URL directly from the
underlying S3 storage.`,
Default: defaultTempLinkThreshold, Default: defaultTempLinkThreshold,
Advanced: true, Advanced: true,
}}, }},
@@ -312,16 +283,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
// Assume it is a file // Assume it is a file
newRoot, remote := dircache.SplitPath(root) newRoot, remote := dircache.SplitPath(root)
tempF := *f newF := *f
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF) newF.dirCache = dircache.New(newRoot, f.trueRootID, &newF)
tempF.root = newRoot newF.root = newRoot
// Make new Fs which is the parent // Make new Fs which is the parent
err = tempF.dirCache.FindRoot(false) err = newF.dirCache.FindRoot(false)
if err != nil { if err != nil {
// No root so return old f // No root so return old f
return f, nil return f, nil
} }
_, err := tempF.newObjectWithInfo(remote, nil) _, err := newF.newObjectWithInfo(remote, nil)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -329,13 +300,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
return nil, err return nil, err
} }
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
return f, fs.ErrorIsFile return &newF, fs.ErrorIsFile
} }
return f, nil return f, nil
} }
@@ -1274,38 +1240,24 @@ func (o *Object) MimeType() string {
// Automatically restarts itself in case of unexpected behaviour of the remote. // Automatically restarts itself in case of unexpected behaviour of the remote.
// //
// Close the returned channel to stop being notified. // Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
checkpoint := f.opt.Checkpoint checkpoint := f.opt.Checkpoint
quit := make(chan bool)
go func() { go func() {
var ticker *time.Ticker
var tickerC <-chan time.Time
for { for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if pollInterval == 0 {
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
} else {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint) checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil { if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
fs.Debugf(f, "Unable to save checkpoint: %v", err) fs.Debugf(f, "Unable to save checkpoint: %v", err)
} }
select {
case <-quit:
return
case <-time.After(pollInterval):
} }
} }
}() }()
return quit
} }
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string { func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {

View File

@@ -7,7 +7,6 @@ package azureblob
import ( import (
"bytes" "bytes"
"context" "context"
"crypto/md5"
"encoding/base64" "encoding/base64"
"encoding/binary" "encoding/binary"
"encoding/hex" "encoding/hex"
@@ -22,7 +21,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-blob-go/2018-03-28/azblob"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting" "github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configmap"
@@ -38,18 +37,17 @@ const (
minSleep = 10 * time.Millisecond minSleep = 10 * time.Millisecond
maxSleep = 10 * time.Second maxSleep = 10 * time.Second
decayConstant = 1 // bigger for slower decay, exponential decayConstant = 1 // bigger for slower decay, exponential
maxListChunkSize = 5000 // number of items to read at once listChunkSize = 5000 // number of items to read at once
modTimeKey = "mtime" modTimeKey = "mtime"
timeFormatIn = time.RFC3339 timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00" timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
maxTotalParts = 50000 // in multipart upload maxTotalParts = 50000 // in multipart upload
storageDefaultBaseURL = "blob.core.windows.net" storageDefaultBaseURL = "blob.core.windows.net"
// maxUncommittedSize = 9 << 30 // can't upload bigger than this // maxUncommittedSize = 9 << 30 // can't upload bigger than this
defaultChunkSize = 4 * fs.MebiByte defaultChunkSize = 4 * 1024 * 1024
maxChunkSize = 100 * fs.MebiByte maxChunkSize = 100 * 1024 * 1024
defaultUploadCutoff = 256 * fs.MebiByte defaultUploadCutoff = 256 * 1024 * 1024
maxUploadCutoff = 256 * fs.MebiByte maxUploadCutoff = 256 * 1024 * 1024
defaultAccessTier = azblob.AccessTierNone
) )
// Register with Fs // Register with Fs
@@ -73,45 +71,14 @@ func init() {
Advanced: true, Advanced: true,
}, { }, {
Name: "upload_cutoff", Name: "upload_cutoff",
Help: "Cutoff for switching to chunked upload (<= 256MB).", Help: "Cutoff for switching to chunked upload.",
Default: fs.SizeSuffix(defaultUploadCutoff), Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_size", Name: "chunk_size",
Help: `Upload chunk size (<= 100MB). Help: "Upload chunk size. Must fit in memory.",
Note that this is stored in memory and there may be up to
"--transfers" chunks stored at once in memory.`,
Default: fs.SizeSuffix(defaultChunkSize), Default: fs.SizeSuffix(defaultChunkSize),
Advanced: true, Advanced: true,
}, {
Name: "list_chunk",
Help: `Size of blob list.
This sets the number of blobs requested in each listing chunk. Default
is the maximum, 5000. "List blobs" requests are permitted 2 minutes
per megabyte to complete. If an operation is taking longer than 2
minutes per megabyte on average, it will time out (
[source](https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-blob-service-operations#exceptions-to-default-timeout-interval)
). This can be used to limit the number of blobs items to return, to
avoid the time out.`,
Default: maxListChunkSize,
Advanced: true,
}, {
Name: "access_tier",
Help: `Access tier of blob: hot, cool or archive.
Archived blobs can be restored by setting access tier to hot or
cool. Leave blank if you intend to use default access tier, which is
set at account level
If there is no "access tier" specified, rclone doesn't apply any tier.
rclone performs "Set Tier" operation on blobs while uploading, if objects
are not modified, specifying "access tier" to new one will have no effect.
If blobs are in "archive tier" at remote, trying to perform data transfer
operations from remote will not be allowed. User should first restore by
tiering blob to "Hot" or "Cool".`,
Advanced: true,
}}, }},
}) })
} }
@@ -124,8 +91,6 @@ type Options struct {
SASURL string `config:"sas_url"` SASURL string `config:"sas_url"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
} }
// Fs represents a remote azure server // Fs represents a remote azure server
@@ -185,7 +150,7 @@ func (f *Fs) Features() *fs.Features {
} }
// Pattern to match a azure path // Pattern to match a azure path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`) var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parseParse parses a azure 'url' // parseParse parses a azure 'url'
func parsePath(path string) (container, directory string, err error) { func parsePath(path string) (container, directory string, err error) {
@@ -199,19 +164,6 @@ func parsePath(path string) (container, directory string, err error) {
return return
} }
// validateAccessTier checks if azureblob supports user supplied tier
func validateAccessTier(tier string) bool {
switch tier {
case string(azblob.AccessTierHot),
string(azblob.AccessTierCool),
string(azblob.AccessTierArchive):
// valid cases
return true
default:
return false
}
}
// retryErrorCodes is a slice of error codes that we will retry // retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{ var retryErrorCodes = []int{
401, // Unauthorized (eg "Token has expired") 401, // Unauthorized (eg "Token has expired")
@@ -237,40 +189,6 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
return fserrors.ShouldRetry(err), err return fserrors.ShouldRetry(err), err
} }
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%v must be less than or equal to %v", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs contstructs an Fs from the path, container:path // NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
@@ -280,16 +198,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err return nil, err
} }
err = checkUploadCutoff(opt.UploadCutoff) if opt.UploadCutoff > maxUploadCutoff {
if err != nil { return nil, errors.Errorf("azure: upload cutoff (%v) must be less than or equal to %v", opt.UploadCutoff, maxUploadCutoff)
return nil, errors.Wrap(err, "azure: upload cutoff")
} }
err = checkUploadChunkSize(opt.ChunkSize) if opt.ChunkSize > maxChunkSize {
if err != nil { return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, opt.ChunkSize)
return nil, errors.Wrap(err, "azure: chunk size")
}
if opt.ListChunkSize > maxListChunkSize {
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
} }
container, directory, err := parsePath(root) container, directory, err := parsePath(root)
if err != nil { if err != nil {
@@ -299,13 +212,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
opt.Endpoint = storageDefaultBaseURL opt.Endpoint = storageDefaultBaseURL
} }
if opt.AccessTier == "" {
opt.AccessTier = string(defaultAccessTier)
} else if !validateAccessTier(opt.AccessTier) {
return nil, errors.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
}
var ( var (
u *url.URL u *url.URL
serviceURL azblob.ServiceURL serviceURL azblob.ServiceURL
@@ -313,11 +219,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
) )
switch { switch {
case opt.Account != "" && opt.Key != "": case opt.Account != "" && opt.Key != "":
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key) credential := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
if err != nil {
return nil, errors.Wrapf(err, "Failed to parse credentials")
}
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint)) u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint") return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
@@ -363,8 +265,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ReadMimeType: true, ReadMimeType: true,
WriteMimeType: true, WriteMimeType: true,
BucketBased: true, BucketBased: true,
SetTier: true,
GetTier: true,
}).Fill(f) }).Fill(f)
if f.root != "" { if f.root != "" {
f.root += "/" f.root += "/"
@@ -554,7 +454,7 @@ func (f *Fs) markContainerOK() {
// listDir lists a single directory // listDir lists a single directory
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) { func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
err = f.list(dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error { err = f.list(dir, false, listChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory) entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil { if err != nil {
return err return err
@@ -625,7 +525,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
return fs.ErrorListBucketRequired return fs.ErrorListBucketRequired
} }
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
err = f.list(dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error { err = f.list(dir, true, listChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory) entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil { if err != nil {
return err return err
@@ -646,11 +546,11 @@ type listContainerFn func(*azblob.ContainerItem) error
// listContainersToFn lists the containers to the function supplied // listContainersToFn lists the containers to the function supplied
func (f *Fs) listContainersToFn(fn listContainerFn) error { func (f *Fs) listContainersToFn(fn listContainerFn) error {
params := azblob.ListContainersSegmentOptions{ params := azblob.ListContainersSegmentOptions{
MaxResults: int32(f.opt.ListChunkSize), MaxResults: int32(listChunkSize),
} }
ctx := context.Background() ctx := context.Background()
for marker := (azblob.Marker{}); marker.NotDone(); { for marker := (azblob.Marker{}); marker.NotDone(); {
var response *azblob.ListContainersSegmentResponse var response *azblob.ListContainersResponse
err := f.pacer.Call(func() (bool, error) { err := f.pacer.Call(func() (bool, error) {
var err error var err error
response, err = f.svcURL.ListContainersSegment(ctx, marker, params) response, err = f.svcURL.ListContainersSegment(ctx, marker, params)
@@ -832,7 +732,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
var startCopy *azblob.BlobStartCopyFromURLResponse var startCopy *azblob.BlobStartCopyFromURLResponse
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, azblob.ModifiedAccessConditions{}, options) startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, options, options)
return f.shouldRetry(err) return f.shouldRetry(err)
}) })
if err != nil { if err != nil {
@@ -917,8 +817,9 @@ func (o *Object) setMetadata(metadata azblob.Metadata) {
// o.md5 // o.md5
// o.meta // o.meta
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) { func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain // NOTE - In BlobGetPropertiesResponse, Client library returns MD5 as base64 decoded string
// this as base64 encoded string. // unlike BlobProperties in BlobItem (used in decodeMetadataFromBlob) which returns base64
// encoded bytes. Object needs to maintain this as base64 encoded string.
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5()) o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
o.mimeType = info.ContentType() o.mimeType = info.ContentType()
o.size = info.ContentLength() o.size = info.ContentLength()
@@ -930,9 +831,7 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper
} }
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) { func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain o.md5 = string(info.Properties.ContentMD5[:])
// this as base64 encoded string.
o.md5 = base64.StdEncoding.EncodeToString(info.Properties.ContentMD5)
o.mimeType = *info.Properties.ContentType o.mimeType = *info.Properties.ContentType
o.size = *info.Properties.ContentLength o.size = *info.Properties.ContentLength
o.modTime = info.Properties.LastModified o.modTime = info.Properties.LastModified
@@ -1047,10 +946,6 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Offset and Count for range download // Offset and Count for range download
var offset int64 var offset int64
var count int64 var count int64
if o.AccessTier() == azblob.AccessTierArchive {
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
}
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
case *fs.RangeOption: case *fs.RangeOption:
@@ -1149,7 +1044,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, blob *azblob.BlobURL,
var ( var (
rawID uint64 rawID uint64
blockID = "" // id in base64 encoded form blockID = "" // id in base64 encoded form
blocks []string blocks = make([]string, totalParts)
) )
// increment the blockID // increment the blockID
@@ -1206,14 +1101,11 @@ outer:
defer o.fs.uploadToken.Put() defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize)) fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
// Upload the block, with MD5 for check
md5sum := md5.Sum(buf)
transactionalMD5 := md5sum[:]
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
bufferReader := bytes.NewReader(buf) bufferReader := bytes.NewReader(buf)
wrappedReader := wrap(bufferReader) wrappedReader := wrap(bufferReader)
rs := readSeeker{wrappedReader, bufferReader} rs := readSeeker{wrappedReader, bufferReader}
_, err = blockBlobURL.StageBlock(ctx, blockID, &rs, ac, transactionalMD5) _, err = blockBlobURL.StageBlock(ctx, blockID, rs, ac)
return o.fs.shouldRetry(err) return o.fs.shouldRetry(err)
}) })
@@ -1290,20 +1182,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
Metadata: o.meta, Metadata: o.meta,
BlobHTTPHeaders: httpHeaders, BlobHTTPHeaders: httpHeaders,
} }
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
// is merged the SDK can't upload a single blob of exactly the chunk
// size, so upload with a multpart upload to work around.
// See: https://github.com/ncw/rclone/issues/2653
multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
if size == int64(o.fs.opt.ChunkSize) {
multipartUpload = true
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
}
ctx := context.Background() ctx := context.Background()
// Don't retry, return a retry error instead // Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
if multipartUpload { if size >= int64(o.fs.opt.UploadCutoff) {
// If a large file upload in chunks // If a large file upload in chunks
err = o.uploadMultipart(in, size, &blob, &httpHeaders) err = o.uploadMultipart(in, size, &blob, &httpHeaders)
} else { } else {
@@ -1316,20 +1199,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
if err != nil { if err != nil {
return err return err
} }
// Refresh metadata on object
o.clearMetaData() o.clearMetaData()
err = o.readMetaData() return o.readMetaData()
if err != nil {
return err
}
// If tier is not changed or not specified, do not attempt to invoke `SetBlobTier` operation
if o.fs.opt.AccessTier == string(defaultAccessTier) || o.fs.opt.AccessTier == string(o.AccessTier()) {
return nil
}
// Now, set blob tier based on configured access tier
return o.SetTier(o.fs.opt.AccessTier)
} }
// Remove an object // Remove an object
@@ -1349,46 +1220,6 @@ func (o *Object) MimeType() string {
return o.mimeType return o.mimeType
} }
// AccessTier of an object, default is of type none
func (o *Object) AccessTier() azblob.AccessTierType {
return o.accessTier
}
// SetTier performs changing object tier
func (o *Object) SetTier(tier string) error {
if !validateAccessTier(tier) {
return errors.Errorf("Tier %s not supported by Azure Blob Storage", tier)
}
// Check if current tier already matches with desired tier
if o.GetTier() == tier {
return nil
}
desiredAccessTier := azblob.AccessTierType(tier)
blob := o.getBlobReference()
ctx := context.Background()
err := o.fs.pacer.Call(func() (bool, error) {
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{})
return o.fs.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Failed to set Blob Tier")
}
// Set access tier on local object also, this typically
// gets updated on get blob properties
o.accessTier = desiredAccessTier
fs.Debugf(o, "Successfully changed object tier to %s", tier)
return nil
}
// GetTier returns object tier in azure as string
func (o *Object) GetTier() string {
return string(o.accessTier)
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = &Fs{} _ fs.Fs = &Fs{}

View File

@@ -1,18 +0,0 @@
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
package azureblob
import (
"testing"
"github.com/stretchr/testify/assert"
)
func (f *Fs) InternalTest(t *testing.T) {
// Check first feature flags are set on this
// remote
enabled := f.Features().SetTier
assert.True(t, enabled)
enabled = f.Features().GetTier
assert.True(t, enabled)
}

View File

@@ -2,12 +2,12 @@
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8 // +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
package azureblob package azureblob_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/backend/azureblob"
"github.com/ncw/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
@@ -15,23 +15,6 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:", RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil), NilObject: (*azureblob.Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -31,6 +31,11 @@ func (e *Error) Fatal() bool {
var _ fserrors.Fataler = (*Error)(nil) var _ fserrors.Fataler = (*Error)(nil)
// Account describes a B2 account
type Account struct {
ID string `json:"accountId"` // The identifier for the account.
}
// Bucket describes a B2 bucket // Bucket describes a B2 bucket
type Bucket struct { type Bucket struct {
ID string `json:"bucketId"` ID string `json:"bucketId"`
@@ -69,7 +74,7 @@ const versionFormat = "-v2006-01-02-150405.000"
func (t Timestamp) AddVersion(remote string) string { func (t Timestamp) AddVersion(remote string) string {
ext := path.Ext(remote) ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)] base := remote[:len(remote)-len(ext)]
s := time.Time(t).Format(versionFormat) s := (time.Time)(t).Format(versionFormat)
// Replace the '.' with a '-' // Replace the '.' with a '-'
s = strings.Replace(s, ".", "-", -1) s = strings.Replace(s, ".", "-", -1)
return base + s + ext return base + s + ext
@@ -102,20 +107,20 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
// IsZero returns true if the timestamp is unitialised // IsZero returns true if the timestamp is unitialised
func (t Timestamp) IsZero() bool { func (t Timestamp) IsZero() bool {
return time.Time(t).IsZero() return (time.Time)(t).IsZero()
} }
// Equal compares two timestamps // Equal compares two timestamps
// //
// If either are !IsZero then it returns false // If either are !IsZero then it returns false
func (t Timestamp) Equal(s Timestamp) bool { func (t Timestamp) Equal(s Timestamp) bool {
if time.Time(t).IsZero() { if (time.Time)(t).IsZero() {
return false return false
} }
if time.Time(s).IsZero() { if (time.Time)(s).IsZero() {
return false return false
} }
return time.Time(t).Equal(time.Time(s)) return (time.Time)(t).Equal((time.Time)(s))
} }
// File is info about a file // File is info about a file
@@ -132,26 +137,10 @@ type File struct {
// AuthorizeAccountResponse is as returned from the b2_authorize_account call // AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct { type AuthorizeAccountResponse struct {
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
AccountID string `json:"accountId"` // The identifier for the account. AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header. AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files. DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
}
// ListBucketsRequest is parameters for b2_list_buckets call
type ListBucketsRequest struct {
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId,omitempty"` // When specified, the result will be a list containing just this bucket.
BucketName string `json:"bucketName,omitempty"` // When specified, the result will be a list containing just this bucket.
BucketTypes []string `json:"bucketTypes,omitempty"` // If present, B2 will use it as a filter for bucket types returned in the list buckets response.
} }
// ListBucketsResponse is as returned from the b2_list_buckets call // ListBucketsResponse is as returned from the b2_list_buckets call

View File

@@ -48,9 +48,9 @@ const (
decayConstant = 1 // bigger for slower decay, exponential decayConstant = 1 // bigger for slower decay, exponential
maxParts = 10000 maxParts = 10000
maxVersions = 100 // maximum number of versions we search in --b2-versions mode maxVersions = 100 // maximum number of versions we search in --b2-versions mode
minChunkSize = 5 * fs.MebiByte minChunkSize = 5E6
defaultChunkSize = 96 * fs.MebiByte defaultChunkSize = 96 * 1024 * 1024
defaultUploadCutoff = 200 * fs.MebiByte defaultUploadCutoff = 200E6
) )
// Globals // Globals
@@ -66,7 +66,7 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "account", Name: "account",
Help: "Account ID or Application Key ID", Help: "Account ID",
Required: true, Required: true,
}, { }, {
Name: "key", Name: "key",
@@ -78,23 +78,13 @@ func init() {
Advanced: true, Advanced: true,
}, { }, {
Name: "test_mode", Name: "test_mode",
Help: `A flag string for X-Bz-Test-Mode header for debugging. Help: "A flag string for X-Bz-Test-Mode header for debugging.",
This is for debugging purposes only. Setting it to one of the strings
below will cause b2 to return specific errors:
* "fail_some_uploads"
* "expire_some_account_authorization_tokens"
* "force_cap_exceeded"
These will be set in the "X-Bz-Test-Mode" header which is documented
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
Default: "", Default: "",
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
Advanced: true, Advanced: true,
}, { }, {
Name: "versions", Name: "versions",
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.", Help: "Include old versions in directory listings.",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
@@ -103,21 +93,12 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
Default: false, Default: false,
}, { }, {
Name: "upload_cutoff", Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload. Help: "Cutoff for switching to chunked upload.",
Files above this size will be uploaded in chunks of "--b2-chunk-size".
This value should be set no larger than 4.657GiB (== 5GB).`,
Default: fs.SizeSuffix(defaultUploadCutoff), Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_size", Name: "chunk_size",
Help: `Upload chunk size. Must fit in memory. Help: "Upload chunk size. Must fit in memory.",
When uploading large files, chunk the file into this size. Note that
these chunks are buffered in memory and there might a maximum of
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
minimim size.`,
Default: fs.SizeSuffix(defaultChunkSize), Default: fs.SizeSuffix(defaultChunkSize),
Advanced: true, Advanced: true,
}}, }},
@@ -196,7 +177,7 @@ func (f *Fs) Features() *fs.Features {
} }
// Pattern to match a b2 path // Pattern to match a b2 path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`) var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parseParse parses a b2 'url' // parseParse parses a b2 'url'
func parsePath(path string) (bucket, directory string, err error) { func parsePath(path string) (bucket, directory string, err error) {
@@ -282,37 +263,6 @@ func errorHandler(resp *http.Response) error {
return errResponse return errResponse
} }
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
f.fillBufferTokens() // reset the buffer tokens
}
return
}
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
if cs < opt.ChunkSize {
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(&f.opt, cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs contstructs an Fs from the path, bucket:path // NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
@@ -321,13 +271,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = checkUploadCutoff(opt, opt.UploadCutoff) if opt.UploadCutoff < opt.ChunkSize {
if err != nil { return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", opt.UploadCutoff, opt.ChunkSize)
return nil, errors.Wrap(err, "b2: upload cutoff")
} }
err = checkUploadChunkSize(opt.ChunkSize) if opt.ChunkSize < minChunkSize {
if err != nil { return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, opt.ChunkSize)
return nil, errors.Wrap(err, "b2: chunk size")
} }
bucket, directory, err := parsePath(root) bucket, directory, err := parsePath(root)
if err != nil { if err != nil {
@@ -349,6 +297,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: directory, root: directory,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler), srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
bufferTokens: make(chan []byte, fs.Config.Transfers),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
ReadMimeType: true, ReadMimeType: true,
@@ -361,16 +310,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.srv.SetHeader(testModeHeader, testMode) f.srv.SetHeader(testModeHeader, testMode)
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode) fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
} }
f.fillBufferTokens() // Fill up the buffer tokens
for i := 0; i < fs.Config.Transfers; i++ {
f.bufferTokens <- nil
}
err = f.authorizeAccount() err = f.authorizeAccount()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to authorize account") return nil, errors.Wrap(err, "failed to authorize account")
} }
// If this is a key limited to a single bucket, it must exist already
if f.bucket != "" && f.info.Allowed.BucketID != "" {
f.markBucketOK()
f.setBucketID(f.info.Allowed.BucketID)
}
if f.root != "" { if f.root != "" {
f.root += "/" f.root += "/"
// Check to see if the (bucket,directory) is actually an existing file // Check to see if the (bucket,directory) is actually an existing file
@@ -469,14 +416,6 @@ func (f *Fs) clearUploadURL() {
f.uploadMu.Unlock() f.uploadMu.Unlock()
} }
// Fill up (or reset) the buffer tokens
func (f *Fs) fillBufferTokens() {
f.bufferTokens = make(chan []byte, fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
f.bufferTokens <- nil
}
}
// getUploadBlock gets a block from the pool of size chunkSize // getUploadBlock gets a block from the pool of size chunkSize
func (f *Fs) getUploadBlock() []byte { func (f *Fs) getUploadBlock() []byte {
buf := <-f.bufferTokens buf := <-f.bufferTokens
@@ -752,11 +691,7 @@ type listBucketFn func(*api.Bucket) error
// listBucketsToFn lists the buckets to the function supplied // listBucketsToFn lists the buckets to the function supplied
func (f *Fs) listBucketsToFn(fn listBucketFn) error { func (f *Fs) listBucketsToFn(fn listBucketFn) error {
var account = api.ListBucketsRequest{ var account = api.Account{ID: f.info.AccountID}
AccountID: f.info.AccountID,
BucketID: f.info.Allowed.BucketID,
}
var response api.ListBucketsResponse var response api.ListBucketsResponse
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",

View File

@@ -1,10 +1,10 @@
// Test B2 filesystem interface // Test B2 filesystem interface
package b2 package b2_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/backend/b2"
"github.com/ncw/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
@@ -12,23 +12,6 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestB2:", RemoteName: "TestB2:",
NilObject: (*Object)(nil), NilObject: (*b2.Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
NeedMultipleChunks: true,
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -61,7 +61,7 @@ func (e *Error) Error() string {
var _ error = (*Error)(nil) var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo // ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link" var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
// Types of things in Item // Types of things in Item
const ( const (
@@ -86,10 +86,6 @@ type Item struct {
ContentCreatedAt Time `json:"content_created_at"` ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"` ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
} }
// ModTime returns the modification time of the item // ModTime returns the modification time of the item
@@ -149,14 +145,6 @@ type CopyFile struct {
Parent Parent `json:"parent"` Parent Parent `json:"parent"`
} }
// CreateSharedLink is the request for Public Link
type CreateSharedLink struct {
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
}
// UploadSessionRequest is uses in Create Upload Session // UploadSessionRequest is uses in Create Upload Session
type UploadSessionRequest struct { type UploadSessionRequest struct {
FolderID string `json:"folder_id,omitempty"` // don't pass for update FolderID string `json:"folder_id,omitempty"` // don't pass for update
@@ -184,8 +172,8 @@ type UploadSessionResponse struct {
// Part defines the return from upload part call which are passed to commit upload also // Part defines the return from upload part call which are passed to commit upload also
type Part struct { type Part struct {
PartID string `json:"part_id"` PartID string `json:"part_id"`
Offset int64 `json:"offset"` Offset int `json:"offset"`
Size int64 `json:"size"` Size int `json:"size"`
Sha1 string `json:"sha1"` Sha1 string `json:"sha1"`
} }

View File

@@ -85,14 +85,9 @@ func init() {
Help: "Box App Client Secret\nLeave blank normally.", Help: "Box App Client Secret\nLeave blank normally.",
}, { }, {
Name: "upload_cutoff", Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload (>= 50MB).", Help: "Cutoff for switching to multipart upload.",
Default: fs.SizeSuffix(defaultUploadCutoff), Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true, Advanced: true,
}, {
Name: "commit_retries",
Help: "Max number of times to try committing a multipart file.",
Default: 100,
Advanced: true,
}}, }},
}) })
} }
@@ -100,7 +95,6 @@ func init() {
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CommitRetries int `config:"commit_retries"`
} }
// Fs represents a remote box // Fs represents a remote box
@@ -126,7 +120,6 @@ type Object struct {
size int64 // size of the object size int64 // size of the object
modTime time.Time // modification time of the object modTime time.Time // modification time of the object
id string // ID of the object id string // ID of the object
publicLink string // Public Link for the object
sha1 string // SHA-1 of the object content sha1 string // SHA-1 of the object content
} }
@@ -283,16 +276,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
// Assume it is a file // Assume it is a file
newRoot, remote := dircache.SplitPath(root) newRoot, remote := dircache.SplitPath(root)
tempF := *f newF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF) newF.dirCache = dircache.New(newRoot, rootID, &newF)
tempF.root = newRoot newF.root = newRoot
// Make new Fs which is the parent // Make new Fs which is the parent
err = tempF.dirCache.FindRoot(false) err = newF.dirCache.FindRoot(false)
if err != nil { if err != nil {
// No root so return old f // No root so return old f
return f, nil return f, nil
} }
_, err := tempF.newObjectWithInfo(remote, nil) _, err := newF.newObjectWithInfo(remote, nil)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -300,14 +293,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
return nil, err return nil, err
} }
f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
return f, fs.ErrorIsFile return &newF, fs.ErrorIsFile
} }
return f, nil return f, nil
} }
@@ -681,7 +668,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
Parameters: fieldsValue(), Parameters: fieldsValue(),
} }
replacedLeaf := replaceReservedChars(leaf) replacedLeaf := replaceReservedChars(leaf)
copyFile := api.CopyFile{ copy := api.CopyFile{
Name: replacedLeaf, Name: replacedLeaf,
Parent: api.Parent{ Parent: api.Parent{
ID: directoryID, ID: directoryID,
@@ -690,7 +677,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
var resp *http.Response var resp *http.Response
var info *api.Item var info *api.Item
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &copyFile, &info) resp, err = f.srv.CallJSON(&opts, &copy, &info)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -851,46 +838,6 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
return nil return nil
} }
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(remote string) (string, error) {
id, err := f.dirCache.FindDir(remote, false)
var opts rest.Opts
if err == nil {
fs.Debugf(f, "attempting to share directory '%s'", remote)
opts = rest.Opts{
Method: "PUT",
Path: "/folders/" + id,
Parameters: fieldsValue(),
}
} else {
fs.Debugf(f, "attempting to share single file '%s'", remote)
o, err := f.NewObject(remote)
if err != nil {
return "", err
}
if o.(*Object).publicLink != "" {
return o.(*Object).publicLink, nil
}
opts = rest.Opts{
Method: "PUT",
Path: "/files/" + o.(*Object).id,
Parameters: fieldsValue(),
}
}
shareLink := api.CreateSharedLink{}
var info api.Item
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
return shouldRetry(resp, err)
})
return info.SharedLink.URL, err
}
// DirCacheFlush resets the directory cache - used in testing as an // DirCacheFlush resets the directory cache - used in testing as an
// optional interface // optional interface
func (f *Fs) DirCacheFlush() { func (f *Fs) DirCacheFlush() {
@@ -955,7 +902,6 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
o.sha1 = info.SHA1 o.sha1 = info.SHA1
o.modTime = info.ModTime() o.modTime = info.ModTime()
o.id = info.ID o.id = info.ID
o.publicLink = info.SharedLink.URL
return nil return nil
} }
@@ -1135,7 +1081,6 @@ var (
_ fs.Mover = (*Fs)(nil) _ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil) _ fs.IDer = (*Object)(nil)
) )

View File

@@ -96,9 +96,7 @@ func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.T
request.Attributes.ContentCreatedAt = api.Time(modTime) request.Attributes.ContentCreatedAt = api.Time(modTime)
var body []byte var body []byte
var resp *http.Response var resp *http.Response
// For discussion of this value see: maxTries := fs.Config.LowLevelRetries
// https://github.com/ncw/rclone/issues/2054
maxTries := o.fs.opt.CommitRetries
const defaultDelay = 10 const defaultDelay = 10
var tries int var tries int
outer: outer:

395
backend/cache/cache.go vendored
View File

@@ -6,13 +6,10 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"math"
"os" "os"
"os/signal" "os/signal"
"path" "path"
"path/filepath" "path/filepath"
"sort"
"strconv"
"strings" "strings"
"sync" "sync"
"syscall" "syscall"
@@ -24,7 +21,6 @@ import (
"github.com/ncw/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/rc" "github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fs/walk" "github.com/ncw/rclone/fs/walk"
@@ -83,17 +79,9 @@ func init() {
Help: "The plex token for authentication - auto set normally", Help: "The plex token for authentication - auto set normally",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Advanced: true, Advanced: true,
}, {
Name: "plex_insecure",
Help: "Skip all certificate verifications when connecting to the Plex server",
Advanced: true,
}, { }, {
Name: "chunk_size", Name: "chunk_size",
Help: `The size of a chunk (partial file data). Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading.",
Use lower numbers for slower connections. If the chunk size is
changed, any downloaded chunks will be invalid and cache-chunk-path
will need to be cleared or unexpected EOF errors will occur.`,
Default: DefCacheChunkSize, Default: DefCacheChunkSize,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "1m", Value: "1m",
@@ -107,9 +95,7 @@ will need to be cleared or unexpected EOF errors will occur.`,
}}, }},
}, { }, {
Name: "info_age", Name: "info_age",
Help: `How long to cache file structure information (directory listings, file size, times etc). Help: "How much time should object info (file size, file hashes etc) be stored in cache.\nUse a very high value if you don't plan on changing the source FS from outside the cache.\nAccepted units are: \"s\", \"m\", \"h\".",
If all write operations are done through the cache then you can safely make
this value very large as the cache store will also be updated in real time.`,
Default: DefCacheInfoAge, Default: DefCacheInfoAge,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "1h", Value: "1h",
@@ -123,10 +109,7 @@ this value very large as the cache store will also be updated in real time.`,
}}, }},
}, { }, {
Name: "chunk_total_size", Name: "chunk_total_size",
Help: `The total size that the chunks can take up on the local disk. Help: "The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted.",
If the cache exceeds this value then it will start to delete the
oldest chunks until it goes under this value.`,
Default: DefCacheTotalChunkSize, Default: DefCacheTotalChunkSize,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "500M", Value: "500M",
@@ -141,143 +124,63 @@ oldest chunks until it goes under this value.`,
}, { }, {
Name: "db_path", Name: "db_path",
Default: filepath.Join(config.CacheDir, "cache-backend"), Default: filepath.Join(config.CacheDir, "cache-backend"),
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.", Help: "Directory to cache DB",
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_path", Name: "chunk_path",
Default: filepath.Join(config.CacheDir, "cache-backend"), Default: filepath.Join(config.CacheDir, "cache-backend"),
Help: `Directory to cache chunk files. Help: "Directory to cache chunk files",
Path to where partial file data (chunks) are stored locally. The remote
name is appended to the final path.
This config follows the "--cache-db-path". If you specify a custom
location for "--cache-db-path" and don't specify one for "--cache-chunk-path"
then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
Advanced: true, Advanced: true,
}, { }, {
Name: "db_purge", Name: "db_purge",
Default: false, Default: false,
Help: "Clear all the cached data for this remote on start.", Help: "Purge the cache DB before",
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_clean_interval", Name: "chunk_clean_interval",
Default: DefCacheChunkCleanInterval, Default: DefCacheChunkCleanInterval,
Help: `How often should the cache perform cleanups of the chunk storage. Help: "Interval at which chunk cleanup runs",
The default value should be ok for most people. If you find that the
cache goes over "cache-chunk-total-size" too often then try to lower
this value to force it to perform cleanups more often.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "read_retries", Name: "read_retries",
Default: DefCacheReadRetries, Default: DefCacheReadRetries,
Help: `How many times to retry a read from a cache storage. Help: "How many times to retry a read from a cache storage",
Since reading from a cache stream is independent from downloading file
data, readers can get to a point where there's no more data in the
cache. Most of the times this can indicate a connectivity issue if
cache isn't able to provide file data anymore.
For really slow connections, increase this to a point where the stream is
able to provide data but your experience will be very stuttering.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "workers", Name: "workers",
Default: DefCacheTotalWorkers, Default: DefCacheTotalWorkers,
Help: `How many workers should run in parallel to download chunks. Help: "How many workers should run in parallel to download chunks",
Higher values will mean more parallel processing (better CPU needed)
and more concurrent requests on the cloud provider. This impacts
several aspects like the cloud provider API limits, more stress on the
hardware that rclone runs on but it also means that streams will be
more fluid and data will be available much more faster to readers.
**Note**: If the optional Plex integration is enabled then this
setting will adapt to the type of reading performed and the value
specified here will be used as a maximum number of workers to use.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_no_memory", Name: "chunk_no_memory",
Default: DefCacheChunkNoMemory, Default: DefCacheChunkNoMemory,
Help: `Disable the in-memory cache for storing chunks during streaming. Help: "Disable the in-memory cache for storing chunks during streaming",
By default, cache will keep file data during streaming in RAM as well
to provide it to readers as fast as possible.
This transient data is evicted as soon as it is read and the number of
chunks stored doesn't exceed the number of workers. However, depending
on other settings like "cache-chunk-size" and "cache-workers" this footprint
can increase if there are parallel streams too (multiple files being read
at the same time).
If the hardware permits it, use this feature to provide an overall better
performance during streaming but it can also be disabled if RAM is not
available on the local machine.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "rps", Name: "rps",
Default: int(DefCacheRps), Default: int(DefCacheRps),
Help: `Limits the number of requests per second to the source FS (-1 to disable) Help: "Limits the number of requests per second to the source FS. -1 disables the rate limiter",
This setting places a hard limit on the number of requests per second
that cache will be doing to the cloud provider remote and try to
respect that value by setting waits between reads.
If you find that you're getting banned or limited on the cloud
provider through cache and know that a smaller number of requests per
second will allow you to work with it then you can use this setting
for that.
A good balance of all the other settings should make this setting
useless but it is available to set for more special cases.
**NOTE**: This will limit the number of requests during streams but
other API calls to the cloud provider like directory listings will
still pass.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "writes", Name: "writes",
Default: DefCacheWrites, Default: DefCacheWrites,
Help: `Cache file data on writes through the FS Help: "Will cache file data on writes through the FS",
If you need to read files immediately after you upload them through
cache you can enable this flag to have their data stored in the
cache store at the same time during upload.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "tmp_upload_path", Name: "tmp_upload_path",
Default: "", Default: "",
Help: `Directory to keep temporary files until they are uploaded. Help: "Directory to keep temporary files until they are uploaded to the cloud storage",
This is the path where cache will use as a temporary storage for new
files that need to be uploaded to the cloud provider.
Specifying a value will enable this feature. Without it, it is
completely disabled and files will be uploaded directly to the cloud
provider`,
Advanced: true, Advanced: true,
}, { }, {
Name: "tmp_wait_time", Name: "tmp_wait_time",
Default: DefCacheTmpWaitTime, Default: DefCacheTmpWaitTime,
Help: `How long should files be stored in local cache before being uploaded Help: "How long should files be stored in local cache before being uploaded",
This is the duration that a file must wait in the temporary location
_cache-tmp-upload-path_ before it is selected for upload.
Note that only one file is uploaded at a time and it can take longer
to start the upload if a queue formed for this purpose.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "db_wait_time", Name: "db_wait_time",
Default: DefCacheDbWaitTime, Default: DefCacheDbWaitTime,
Help: `How long to wait for the DB to be available - 0 is unlimited Help: "How long to wait for the DB to be available - 0 is unlimited",
Only one process can have the DB open at any one time, so rclone waits
for this duration for the DB to become available before it gives an
error.
If you set it to 0 then it will wait forever.`,
Advanced: true, Advanced: true,
}}, }},
}) })
@@ -290,7 +193,6 @@ type Options struct {
PlexUsername string `config:"plex_username"` PlexUsername string `config:"plex_username"`
PlexPassword string `config:"plex_password"` PlexPassword string `config:"plex_password"`
PlexToken string `config:"plex_token"` PlexToken string `config:"plex_token"`
PlexInsecure bool `config:"plex_insecure"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
InfoAge fs.Duration `config:"info_age"` InfoAge fs.Duration `config:"info_age"`
ChunkTotalSize fs.SizeSuffix `config:"chunk_total_size"` ChunkTotalSize fs.SizeSuffix `config:"chunk_total_size"`
@@ -346,7 +248,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
return nil, err return nil, err
} }
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) { if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)", return nil, errors.Errorf("don't set cache-total-chunk-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers) opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
} }
@@ -359,15 +261,10 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath) return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
} }
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(opt.Remote) remotePath := path.Join(opt.Remote, rpath)
if err != nil { wrappedFs, wrapErr := fs.NewFs(remotePath)
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
}
remotePath := fspath.JoinRootPath(wPath, rootPath)
wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
if wrapErr != nil && wrapErr != fs.ErrorIsFile { if wrapErr != nil && wrapErr != fs.ErrorIsFile {
return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath) return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
} }
var fsErr error var fsErr error
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath) fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
@@ -393,7 +290,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
f.plexConnector = &plexConnector{} f.plexConnector = &plexConnector{}
if opt.PlexURL != "" { if opt.PlexURL != "" {
if opt.PlexToken != "" { if opt.PlexToken != "" {
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure) f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL) return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
} }
@@ -403,7 +300,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
decPass = opt.PlexPassword decPass = opt.PlexPassword
} }
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) { f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, func(token string) {
m.Set("plex_token", token) m.Set("plex_token", token)
}) })
if err != nil { if err != nil {
@@ -471,7 +368,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
fs.Infof(name, "Chunk Clean Interval: %v", f.opt.ChunkCleanInterval) fs.Infof(name, "Chunk Clean Interval: %v", f.opt.ChunkCleanInterval)
fs.Infof(name, "Workers: %v", f.opt.TotalWorkers) fs.Infof(name, "Workers: %v", f.opt.TotalWorkers)
fs.Infof(name, "File Age: %v", f.opt.InfoAge) fs.Infof(name, "File Age: %v", f.opt.InfoAge)
if f.opt.StoreWrites { if !f.opt.StoreWrites {
fs.Infof(name, "Cache Writes: enabled") fs.Infof(name, "Cache Writes: enabled")
} }
@@ -506,9 +403,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
}() }()
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil { if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
pollInterval := make(chan time.Duration, 1) doChangeNotify(f.receiveChangeNotify, time.Duration(f.opt.ChunkCleanInterval))
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
doChangeNotify(f.receiveChangeNotify, pollInterval)
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
@@ -560,39 +455,6 @@ Eg
Title: "Get cache stats", Title: "Get cache stats",
Help: ` Help: `
Show statistics for the cache remote. Show statistics for the cache remote.
`,
})
rc.Add(rc.Call{
Path: "cache/fetch",
Fn: f.rcFetch,
Title: "Fetch file chunks",
Help: `
Ensure the specified file chunks are cached on disk.
The chunks= parameter specifies the file chunks to check.
It takes a comma separated list of array slice indices.
The slice indices are similar to Python slices: start[:end]
start is the 0 based chunk number from the beginning of the file
to fetch inclusive. end is 0 based chunk number from the beginning
of the file to fetch exclisive.
Both values can be negative, in which case they count from the back
of the file. The value "-5:" represents the last 5 chunks of a file.
Some valid examples are:
":5,-5:" -> the first and last five chunks
"0,-2" -> the first and the second last chunk
"0:10" -> the first ten chunks
Any parameter with a key that starts with "file" can be used to
specify files to fetch, eg
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
File names will automatically be encrypted when the a crypt remote
is used on top of the cache.
`, `,
}) })
@@ -610,22 +472,6 @@ func (f *Fs) httpStats(in rc.Params) (out rc.Params, err error) {
return out, nil return out, nil
} }
func (f *Fs) unwrapRemote(remote string) string {
remote = cleanPath(remote)
if remote != "" {
// if it's wrapped by crypt we need to check what format we got
if cryptFs, yes := f.isWrappedByCrypt(); yes {
_, err := cryptFs.DecryptFileName(remote)
// if it failed to decrypt then it is a decrypted format and we need to encrypt it
if err != nil {
return cryptFs.EncryptFileName(remote)
}
// else it's an encrypted format and we can use it as it is
}
}
return remote
}
func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) { func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
out = make(rc.Params) out = make(rc.Params)
remoteInt, ok := in["remote"] remoteInt, ok := in["remote"]
@@ -639,10 +485,21 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
withData = true withData = true
} }
remote = f.unwrapRemote(remote) if cleanPath(remote) != "" {
// if it's wrapped by crypt we need to check what format we got
if cryptFs, yes := f.isWrappedByCrypt(); yes {
_, err := cryptFs.DecryptFileName(remote)
// if it failed to decrypt then it is a decrypted format and we need to encrypt it
if err != nil {
remote = cryptFs.EncryptFileName(remote)
}
// else it's an encrypted format and we can use it as it is
}
if !f.cache.HasEntry(path.Join(f.Root(), remote)) { if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
return out, errors.Errorf("%s doesn't exist in cache", remote) return out, errors.Errorf("%s doesn't exist in cache", remote)
} }
}
co := NewObject(f, remote) co := NewObject(f, remote)
err = f.cache.GetObject(co) err = f.cache.GetObject(co)
@@ -671,141 +528,6 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
return out, nil return out, nil
} }
func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
type chunkRange struct {
start, end int64
}
parseChunks := func(ranges string) (crs []chunkRange, err error) {
for _, part := range strings.Split(ranges, ",") {
var start, end int64 = 0, math.MaxInt64
switch ints := strings.Split(part, ":"); len(ints) {
case 1:
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
}
end = start + 1
case 2:
if ints[0] != "" {
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
}
}
if ints[1] != "" {
end, err = strconv.ParseInt(ints[1], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
}
}
default:
return nil, errors.Errorf("invalid range: %q", part)
}
crs = append(crs, chunkRange{start: start, end: end})
}
return
}
walkChunkRange := func(cr chunkRange, size int64, cb func(chunk int64)) {
if size <= 0 {
return
}
chunks := (size-1)/f.ChunkSize() + 1
start, end := cr.start, cr.end
if start < 0 {
start += chunks
}
if end <= 0 {
end += chunks
}
if end <= start {
return
}
switch {
case start < 0:
start = 0
case start >= chunks:
return
}
switch {
case end <= start:
end = start + 1
case end >= chunks:
end = chunks
}
for i := start; i < end; i++ {
cb(i)
}
}
walkChunkRanges := func(crs []chunkRange, size int64, cb func(chunk int64)) {
for _, cr := range crs {
walkChunkRange(cr, size, cb)
}
}
v, ok := in["chunks"]
if !ok {
return nil, errors.New("missing chunks parameter")
}
s, ok := v.(string)
if !ok {
return nil, errors.New("invalid chunks parameter")
}
delete(in, "chunks")
crs, err := parseChunks(s)
if err != nil {
return nil, errors.Wrap(err, "invalid chunks parameter")
}
var files [][2]string
for k, v := range in {
if !strings.HasPrefix(k, "file") {
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
}
switch v := v.(type) {
case string:
files = append(files, [2]string{v, f.unwrapRemote(v)})
default:
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
}
}
type fileStatus struct {
Error string
FetchedChunks int
}
fetchedChunks := make(map[string]fileStatus, len(files))
for _, pair := range files {
file, remote := pair[0], pair[1]
var status fileStatus
o, err := f.NewObject(remote)
if err != nil {
fetchedChunks[file] = fileStatus{Error: err.Error()}
continue
}
co := o.(*Object)
err = co.refreshFromSource(true)
if err != nil {
fetchedChunks[file] = fileStatus{Error: err.Error()}
continue
}
handle := NewObjectHandle(co, f)
handle.UseMemory = false
handle.scaleWorkers(1)
walkChunkRanges(crs, co.Size(), func(chunk int64) {
_, err := handle.getChunk(chunk * f.ChunkSize())
if err != nil {
if status.Error == "" {
status.Error = err.Error()
}
} else {
status.FetchedChunks++
}
})
fetchedChunks[file] = status
}
return rc.Params{"status": fetchedChunks}, nil
}
// receiveChangeNotify is a wrapper to notifications sent from the wrapped FS about changed files // receiveChangeNotify is a wrapper to notifications sent from the wrapped FS about changed files
func (f *Fs) receiveChangeNotify(forgetPath string, entryType fs.EntryType) { func (f *Fs) receiveChangeNotify(forgetPath string, entryType fs.EntryType) {
if crypt, yes := f.isWrappedByCrypt(); yes { if crypt, yes := f.isWrappedByCrypt(); yes {
@@ -873,15 +595,12 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
// ChangeNotify can subsribe multiple callers // ChangeNotify can subsribe multiple callers
// this is coupled with the wrapped fs ChangeNotify (if it supports it) // this is coupled with the wrapped fs ChangeNotify (if it supports it)
// and also notifies other caches (i.e VFS) to clear out whenever something changes // and also notifies other caches (i.e VFS) to clear out whenever something changes
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) { func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
f.parentsForgetMu.Lock() f.parentsForgetMu.Lock()
defer f.parentsForgetMu.Unlock() defer f.parentsForgetMu.Unlock()
fs.Debugf(f, "subscribing to ChangeNotify") fs.Debugf(f, "subscribing to ChangeNotify")
f.parentsForgetFn = append(f.parentsForgetFn, notifyFunc) f.parentsForgetFn = append(f.parentsForgetFn, notifyFunc)
go func() { return make(chan bool)
for range pollInterval {
}
}()
} }
// Name of the remote (as passed into NewFs) // Name of the remote (as passed into NewFs)
@@ -982,6 +701,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
fs.Debugf(dir, "list: cached entries: %v", entries) fs.Debugf(dir, "list: cached entries: %v", entries)
return entries, nil return entries, nil
} }
// FIXME need to clean existing cached listing
// we first search any temporary files stored locally // we first search any temporary files stored locally
var cachedEntries fs.DirEntries var cachedEntries fs.DirEntries
@@ -1007,42 +727,27 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
} }
// search from the source // search from the source
sourceEntries, err := f.Fs.List(dir) entries, err = f.Fs.List(dir)
if err != nil { if err != nil {
return nil, err return nil, err
} }
fs.Debugf(dir, "list: read %v from source", len(sourceEntries)) fs.Debugf(dir, "list: read %v from source", len(entries))
fs.Debugf(dir, "list: source entries: %v", sourceEntries) fs.Debugf(dir, "list: source entries: %v", entries)
sort.Sort(sourceEntries)
for _, entry := range entries {
entryRemote := entry.Remote()
i := sort.Search(len(sourceEntries), func(i int) bool { return sourceEntries[i].Remote() >= entryRemote })
if i < len(sourceEntries) && sourceEntries[i].Remote() == entryRemote {
continue
}
fp := path.Join(f.Root(), entryRemote)
switch entry.(type) {
case fs.Object:
_ = f.cache.RemoveObject(fp)
case fs.Directory:
_ = f.cache.RemoveDir(fp)
}
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
}
entries = nil
// and then iterate over the ones from source (temp Objects will override source ones) // and then iterate over the ones from source (temp Objects will override source ones)
var batchDirectories []*Directory var batchDirectories []*Directory
sort.Sort(cachedEntries) for _, entry := range entries {
tmpCnt := len(cachedEntries)
for _, entry := range sourceEntries {
switch o := entry.(type) { switch o := entry.(type) {
case fs.Object: case fs.Object:
// skip over temporary objects (might be uploading) // skip over temporary objects (might be uploading)
oRemote := o.Remote() found := false
i := sort.Search(tmpCnt, func(i int) bool { return cachedEntries[i].Remote() >= oRemote }) for _, t := range cachedEntries {
if i < tmpCnt && cachedEntries[i].Remote() == oRemote { if t.Remote() == o.Remote() {
found = true
break
}
}
if found {
continue continue
} }
co := ObjectFromOriginal(f, o).persist() co := ObjectFromOriginal(f, o).persist()

View File

@@ -4,9 +4,6 @@ package cache_test
import ( import (
"bytes" "bytes"
"encoding/base64"
goflag "flag"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"log" "log"
@@ -15,12 +12,21 @@ import (
"path" "path"
"path/filepath" "path/filepath"
"runtime" "runtime"
"runtime/debug"
"strconv" "strconv"
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/pkg/errors"
"encoding/base64"
goflag "flag"
"fmt"
"runtime/debug"
"encoding/json"
"net/http"
"github.com/ncw/rclone/backend/cache" "github.com/ncw/rclone/backend/cache"
"github.com/ncw/rclone/backend/crypt" "github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive" _ "github.com/ncw/rclone/backend/drive"
@@ -30,11 +36,10 @@ import (
"github.com/ncw/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/object" "github.com/ncw/rclone/fs/object"
"github.com/ncw/rclone/fs/rc" "github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fs/rc/rcflags"
"github.com/ncw/rclone/fstest" "github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/vfs" "github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags" "github.com/ncw/rclone/vfs/vfsflags"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -690,8 +695,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
} }
func TestInternalChangeSeenAfterRc(t *testing.T) { func TestInternalChangeSeenAfterRc(t *testing.T) {
cacheExpire := rc.Calls.Get("cache/expire") rcflags.Opt.Enabled = true
assert.NotNil(t, cacheExpire) rc.Start(&rcflags.Opt)
id := fmt.Sprintf("ticsarc%v", time.Now().Unix()) id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
@@ -724,8 +729,13 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, o.ModTime().String(), co.ModTime().String()) require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
// Call the rc function m := make(map[string]string)
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"}) res, err := http.Post(fmt.Sprintf("http://localhost:5572/cache/expire?remote=%s", "data.bin"), "application/json; charset=utf-8", strings.NewReader(""))
require.NoError(t, err)
defer func() {
_ = res.Body.Close()
}()
_ = json.NewDecoder(res.Body).Decode(&m)
require.Contains(t, m, "status") require.Contains(t, m, "status")
require.Contains(t, m, "message") require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"]) require.Equal(t, "ok", m["status"])
@@ -745,8 +755,13 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
li1, err = runInstance.list(t, rootFs, "") li1, err = runInstance.list(t, rootFs, "")
require.Len(t, li1, 1) require.Len(t, li1, 1)
// Call the rc function m = make(map[string]string)
m, err = cacheExpire.Fn(rc.Params{"remote": "/"}) res2, err := http.Post("http://localhost:5572/cache/expire?remote=/", "application/json; charset=utf-8", strings.NewReader(""))
require.NoError(t, err)
defer func() {
_ = res2.Body.Close()
}()
_ = json.NewDecoder(res2.Body).Decode(&m)
require.Contains(t, m, "status") require.Contains(t, m, "status")
require.Contains(t, m, "message") require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"]) require.Equal(t, "ok", m["status"])

View File

@@ -3,7 +3,6 @@
package cache_test package cache_test
import ( import (
"fmt"
"math/rand" "math/rand"
"os" "os"
"path" "path"
@@ -11,6 +10,8 @@ import (
"testing" "testing"
"time" "time"
"fmt"
"github.com/ncw/rclone/backend/cache" "github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/drive" _ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"

455
backend/cache/cache_upload_test.go.orig vendored Normal file
View File

@@ -0,0 +1,455 @@
// +build !plan9
package cache_test
import (
"math/rand"
"os"
"path"
"strconv"
"testing"
"time"
"fmt"
"github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require"
)
func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err)
}
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
// create some rand test data
testSize := int64(524288000)
testReader := runInstance.randomReader(t, testSize)
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
// validate that it exists in temp fs
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.NoError(t, err)
if runInstance.rootIsCrypt {
require.Equal(t, int64(524416032), ti.Size())
} else {
require.Equal(t, testSize, ti.Size())
}
de1, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, de1, 1)
runInstance.completeBackgroundUpload(t, "one", bu)
// check if it was removed from temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.True(t, os.IsNotExist(err))
// check if it can be read
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
require.NoError(t, err)
require.Len(t, data2, 1024)
}
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
testSize := int64(10485760)
testReader := runInstance.randomReader(t, testSize)
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
de1, err := runInstance.list(t, rootFs, "one/test")
require.NoError(t, err)
require.Len(t, de1, 1)
time.Sleep(time.Second * 5)
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
//require.NoError(t, err)
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
require.NoError(t, err)
// check if it can be read
de1, err = runInstance.list(t, rootFs, "second/test")
require.NoError(t, err)
require.Len(t, de1, 1)
}
func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
testSize := int64(1048576)
testReader := runInstance.randomReader(t, testSize)
testReader2 := runInstance.randomReader(t, testSize)
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
require.True(t, os.IsNotExist(err))
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.True(t, os.IsNotExist(err))
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
require.False(t, os.IsNotExist(err))
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
require.True(t, os.IsNotExist(err))
de1, err := runInstance.list(t, rootFs, "one/test")
require.NoError(t, err)
require.Len(t, de1, 1)
// check if it can be read
de1, err = runInstance.list(t, rootFs, "second")
require.NoError(t, err)
require.Len(t, de1, 1)
}
func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("test")
require.NoError(t, err)
minSize := 5242880
maxSize := 10485760
totalFiles := 10
rand.Seed(time.Now().Unix())
lastFile := ""
for i := 0; i < totalFiles; i++ {
size := int64(rand.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
// validate that it exists in temp fs
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
require.NoError(t, err)
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
if runInstance.wrappedIsExternal && i < totalFiles-1 {
time.Sleep(time.Second * 3)
}
lastFile = remote
}
// check if cache lists all files, likely temp upload didn't finish yet
de1, err := runInstance.list(t, rootFs, "test")
require.NoError(t, err)
require.Len(t, de1, totalFiles)
// wait for background uploader to do its thing
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
// retry until we have no more temp files and fail if they don't go down to 0
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
require.True(t, os.IsNotExist(err))
// check if cache lists all files
de1, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err)
require.Len(t, de1, totalFiles)
}
func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
// create some rand test data
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// check if it can be read
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data1)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
// test DirMove - allowed
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject("second/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
require.NoError(t, err)
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
require.Error(t, err)
var started bool
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
require.NoError(t, err)
require.False(t, started)
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
}
// test Rmdir - allowed
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
require.Contains(t, err.Error(), "directory not empty")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
require.False(t, started)
require.NoError(t, err)
// test Move/Rename -- allowed
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
if err != errNotSupported {
require.NoError(t, err)
// try to read from it
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject("test/second")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
require.NoError(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
}
// test Copy -- allowed
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
require.NoError(t, err)
}
// test Remove -- allowed
err = runInstance.rm(t, rootFs, "test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// test Update -- allowed
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
require.NoError(t, err)
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
require.NoError(t, err)
obj2, err := rootFs.NewObject("test/one")
require.NoError(t, err)
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
require.Equal(t, "one content updated", string(data2))
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
if runInstance.rootIsCrypt {
require.Equal(t, int64(67), tmpInfo.Size())
} else {
require.Equal(t, int64(len(data2)), tmpInfo.Size())
}
// test SetModTime -- allowed
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
require.NoError(t, err)
require.NotEqual(t, secondModTime, firstModTime)
require.NotEqual(t, time.Time{}, firstModTime)
require.NotEqual(t, time.Time{}, secondModTime)
}
func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
// create some rand test data
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// check if it can be read
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data1)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
require.NoError(t, err)
// test DirMove
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
require.Error(t, err)
}
// test Rmdir
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
// test Move/Rename
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
if err != errNotSupported {
require.Error(t, err)
// try to read from it
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/second")
require.Error(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
require.Error(t, err)
}
// test Copy -- allowed
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
require.NoError(t, err)
}
// test Remove
err = runInstance.rm(t, rootFs, "test/one")
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// test Update - this seems to work. Why? FIXME
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
//require.NoError(t, err)
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
// require.Equal(t, "one content", string(data2))
//
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
// require.NoError(t, err)
// if runInstance.rootIsCrypt {
// require.Equal(t, int64(67), tmpInfo.Size())
// } else {
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
// }
//})
//require.Error(t, err)
// test SetModTime -- seems to work cause of previous
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
//require.NoError(t, err)
//require.Equal(t, secondModTime, firstModTime)
//require.NotEqual(t, time.Time{}, firstModTime)
//require.NotEqual(t, time.Time{}, secondModTime)
}

12
backend/cache/cache_upload_test.go.rej vendored Normal file
View File

@@ -0,0 +1,12 @@
--- cache_upload_test.go
+++ cache_upload_test.go
@@ -1500,9 +1469,6 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
}
r.tempFiles = nil
debug.FreeOSMemory()
- for k, v := range r.runDefaultFlagMap {
- _ = flag.Set(k, v)
- }
}
func (r *run) randomBytes(t *testing.T, size int64) []byte {

View File

@@ -3,15 +3,16 @@
package cache package cache
import ( import (
"path"
"time" "time"
"path"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
) )
// Directory is a generic dir that stores basic information about it // Directory is a generic dir that stores basic information about it
type Directory struct { type Directory struct {
Directory fs.Directory `json:"-"` // can be nil fs.Directory `json:"-"`
CacheFs *Fs `json:"-"` // cache fs CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory Name string `json:"name"` // name of the directory
@@ -124,14 +125,6 @@ func (d *Directory) Items() int64 {
return d.CacheItems return d.CacheItems
} }
// ID returns the ID of the cached directory if known
func (d *Directory) ID() string {
if d.Directory == nil {
return ""
}
return d.Directory.ID()
}
var ( var (
_ fs.Directory = (*Directory)(nil) _ fs.Directory = (*Directory)(nil)
) )

View File

@@ -5,11 +5,12 @@ package cache
import ( import (
"fmt" "fmt"
"io" "io"
"sync"
"time"
"path" "path"
"runtime" "runtime"
"strings" "strings"
"sync"
"time"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations" "github.com/ncw/rclone/fs/operations"
@@ -48,11 +49,10 @@ type Handle struct {
offset int64 offset int64
seenOffsets map[int64]bool seenOffsets map[int64]bool
mu sync.Mutex mu sync.Mutex
workersWg sync.WaitGroup
confirmReading chan bool confirmReading chan bool
workers int
maxWorkerID int
UseMemory bool UseMemory bool
workers []*worker
closed bool closed bool
reading bool reading bool
} }
@@ -95,7 +95,7 @@ func (r *Handle) String() string {
// startReadWorkers will start the worker pool // startReadWorkers will start the worker pool
func (r *Handle) startReadWorkers() { func (r *Handle) startReadWorkers() {
if r.workers > 0 { if r.hasAtLeastOneWorker() {
return return
} }
totalWorkers := r.cacheFs().opt.TotalWorkers totalWorkers := r.cacheFs().opt.TotalWorkers
@@ -117,27 +117,26 @@ func (r *Handle) startReadWorkers() {
// scaleOutWorkers will increase the worker pool count by the provided amount // scaleOutWorkers will increase the worker pool count by the provided amount
func (r *Handle) scaleWorkers(desired int) { func (r *Handle) scaleWorkers(desired int) {
current := r.workers current := len(r.workers)
if current == desired { if current == desired {
return return
} }
if current > desired { if current > desired {
// scale in gracefully // scale in gracefully
for r.workers > desired { for i := 0; i < current-desired; i++ {
r.preloadQueue <- -1 r.preloadQueue <- -1
r.workers--
} }
} else { } else {
// scale out // scale out
for r.workers < desired { for i := 0; i < desired-current; i++ {
w := &worker{ w := &worker{
r: r, r: r,
id: r.maxWorkerID, ch: r.preloadQueue,
id: current + i,
} }
r.workersWg.Add(1)
r.workers++
r.maxWorkerID++
go w.run() go w.run()
r.workers = append(r.workers, w)
} }
} }
// ignore first scale out from 0 // ignore first scale out from 0
@@ -149,7 +148,7 @@ func (r *Handle) scaleWorkers(desired int) {
func (r *Handle) confirmExternalReading() { func (r *Handle) confirmExternalReading() {
// if we have a max value of workers // if we have a max value of workers
// then we skip this step // then we skip this step
if r.workers > 1 || if len(r.workers) > 1 ||
!r.cacheFs().plexConnector.isConfigured() { !r.cacheFs().plexConnector.isConfigured() {
return return
} }
@@ -179,7 +178,7 @@ func (r *Handle) queueOffset(offset int64) {
} }
} }
for i := 0; i < r.workers; i++ { for i := 0; i < len(r.workers); i++ {
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i) o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
if o < 0 || o >= r.cachedObject.Size() { if o < 0 || o >= r.cachedObject.Size() {
continue continue
@@ -194,6 +193,16 @@ func (r *Handle) queueOffset(offset int64) {
} }
} }
func (r *Handle) hasAtLeastOneWorker() bool {
oneWorker := false
for i := 0; i < len(r.workers); i++ {
if r.workers[i].isRunning() {
oneWorker = true
}
}
return oneWorker
}
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it // getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
// it can be from transient or persistent cache // it can be from transient or persistent cache
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer // it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
@@ -234,7 +243,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
// not found in ram or // not found in ram or
// the worker didn't managed to download the chunk in time so we abort and close the stream // the worker didn't managed to download the chunk in time so we abort and close the stream
if err != nil || len(data) == 0 || !found { if err != nil || len(data) == 0 || !found {
if r.workers == 0 { if !r.hasAtLeastOneWorker() {
fs.Errorf(r, "out of workers") fs.Errorf(r, "out of workers")
return nil, io.ErrUnexpectedEOF return nil, io.ErrUnexpectedEOF
} }
@@ -295,7 +304,14 @@ func (r *Handle) Close() error {
close(r.preloadQueue) close(r.preloadQueue)
r.closed = true r.closed = true
// wait for workers to complete their jobs before returning // wait for workers to complete their jobs before returning
r.workersWg.Wait() waitCount := 3
for i := 0; i < len(r.workers); i++ {
waitIdx := 0
for r.workers[i].isRunning() && waitIdx < waitCount {
time.Sleep(time.Second)
waitIdx++
}
}
r.memory.db.Flush() r.memory.db.Flush()
fs.Debugf(r, "cache reader closed %v", r.offset) fs.Debugf(r, "cache reader closed %v", r.offset)
@@ -333,8 +349,11 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
type worker struct { type worker struct {
r *Handle r *Handle
ch <-chan int64
rc io.ReadCloser rc io.ReadCloser
id int id int
running bool
mu sync.Mutex
} }
// String is a representation of this worker // String is a representation of this worker
@@ -379,19 +398,33 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
}) })
} }
func (w *worker) isRunning() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.running
}
func (w *worker) setRunning(f bool) {
w.mu.Lock()
defer w.mu.Unlock()
w.running = f
}
// run is the main loop for the worker which receives offsets to preload // run is the main loop for the worker which receives offsets to preload
func (w *worker) run() { func (w *worker) run() {
var err error var err error
var data []byte var data []byte
defer w.setRunning(false)
defer func() { defer func() {
if w.rc != nil { if w.rc != nil {
_ = w.rc.Close() _ = w.rc.Close()
w.setRunning(false)
} }
w.r.workersWg.Done()
}() }()
for { for {
chunkStart, open := <-w.r.preloadQueue chunkStart, open := <-w.ch
w.setRunning(true)
if chunkStart < 0 || !open { if chunkStart < 0 || !open {
break break
} }

View File

@@ -208,17 +208,11 @@ func (o *Object) SetModTime(t time.Time) error {
// Open is used to request a specific part of the file using fs.RangeOption // Open is used to request a specific part of the file using fs.RangeOption
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) { func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
var err error if err := o.refreshFromSource(true); err != nil {
if o.Object == nil {
err = o.refreshFromSource(true)
} else {
err = o.refresh()
}
if err != nil {
return nil, err return nil, err
} }
var err error
cacheReader := NewObjectHandle(o, o.CacheFs) cacheReader := NewObjectHandle(o, o.CacheFs)
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
for _, option := range options { for _, option := range options {
@@ -359,13 +353,6 @@ func (o *Object) tempFileStartedUpload() bool {
return started return started
} }
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *Object) UnWrap() fs.Object {
return o.Object
}
var ( var (
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
) )

38
backend/cache/plex.go vendored
View File

@@ -3,17 +3,18 @@
package cache package cache
import ( import (
"bytes"
"crypto/tls"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
"sync"
"time" "time"
"sync"
"bytes"
"io/ioutil"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/patrickmn/go-cache" "github.com/patrickmn/go-cache"
"golang.org/x/net/websocket" "golang.org/x/net/websocket"
@@ -53,7 +54,6 @@ type plexConnector struct {
username string username string
password string password string
token string token string
insecure bool
f *Fs f *Fs
mu sync.Mutex mu sync.Mutex
running bool running bool
@@ -63,7 +63,7 @@ type plexConnector struct {
} }
// newPlexConnector connects to a Plex server and generates a token // newPlexConnector connects to a Plex server and generates a token
func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool, saveToken func(string)) (*plexConnector, error) { func newPlexConnector(f *Fs, plexURL, username, password string, saveToken func(string)) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/")) u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil { if err != nil {
return nil, err return nil, err
@@ -75,7 +75,6 @@ func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool,
username: username, username: username,
password: password, password: password,
token: "", token: "",
insecure: insecure,
stateCache: cache.New(time.Hour, time.Minute), stateCache: cache.New(time.Hour, time.Minute),
saveToken: saveToken, saveToken: saveToken,
} }
@@ -84,7 +83,7 @@ func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool,
} }
// newPlexConnector connects to a Plex server and generates a token // newPlexConnector connects to a Plex server and generates a token
func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*plexConnector, error) { func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/")) u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil { if err != nil {
return nil, err return nil, err
@@ -94,7 +93,6 @@ func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*pl
f: f, f: f,
url: u, url: u,
token: token, token: token,
insecure: insecure,
stateCache: cache.New(time.Hour, time.Minute), stateCache: cache.New(time.Hour, time.Minute),
} }
pc.listenWebsocket() pc.listenWebsocket()
@@ -109,26 +107,14 @@ func (p *plexConnector) closeWebsocket() {
p.running = false p.running = false
} }
func (p *plexConnector) websocketDial() (*websocket.Conn, error) {
u := strings.TrimRight(strings.Replace(strings.Replace(
p.url.String(), "http://", "ws://", 1), "https://", "wss://", 1), "/")
url := fmt.Sprintf(defPlexNotificationURL, u, p.token)
config, err := websocket.NewConfig(url, "http://localhost")
if err != nil {
return nil, err
}
if p.insecure {
config.TlsConfig = &tls.Config{InsecureSkipVerify: true}
}
return websocket.DialConfig(config)
}
func (p *plexConnector) listenWebsocket() { func (p *plexConnector) listenWebsocket() {
p.runningMu.Lock() p.runningMu.Lock()
defer p.runningMu.Unlock() defer p.runningMu.Unlock()
conn, err := p.websocketDial() u := strings.Replace(p.url.String(), "http://", "ws://", 1)
u = strings.Replace(u, "https://", "wss://", 1)
conn, err := websocket.Dial(fmt.Sprintf(defPlexNotificationURL, strings.TrimRight(u, "/"), p.token),
"", "http://localhost")
if err != nil { if err != nil {
fs.Errorf("plex", "%v", err) fs.Errorf("plex", "%v", err)
return return
@@ -224,9 +210,7 @@ func (p *plexConnector) authenticate() error {
} }
p.token = token p.token = token
if p.token != "" { if p.token != "" {
if p.saveToken != nil {
p.saveToken(p.token) p.saveToken(p.token)
}
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String()) fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
} }
p.listenWebsocket() p.listenWebsocket()

View File

@@ -3,17 +3,20 @@
package cache package cache
import ( import (
"time"
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"encoding/json" "encoding/json"
"fmt"
"io/ioutil"
"os" "os"
"path" "path"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"time"
"io/ioutil"
"fmt"
bolt "github.com/coreos/bbolt" bolt "github.com/coreos/bbolt"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"

View File

@@ -17,9 +17,11 @@ import (
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting" "github.com/ncw/rclone/fs/accounting"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/scrypt" "golang.org/x/crypto/scrypt"
"github.com/rfjakob/eme"
) )
// Constants // Constants

View File

@@ -24,7 +24,7 @@ func TestNewNameEncryptionMode(t *testing.T) {
{"off", NameEncryptionOff, ""}, {"off", NameEncryptionOff, ""},
{"standard", NameEncryptionStandard, ""}, {"standard", NameEncryptionStandard, ""},
{"obfuscate", NameEncryptionObfuscated, ""}, {"obfuscate", NameEncryptionObfuscated, ""},
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""}, {"potato", NameEncryptionMode(0), "Unknown file name encryption mode \"potato\""},
} { } {
actual, actualErr := NewNameEncryptionMode(test.in) actual, actualErr := NewNameEncryptionMode(test.in)
assert.Equal(t, actual, test.expected) assert.Equal(t, actual, test.expected)

View File

@@ -4,15 +4,14 @@ package crypt
import ( import (
"fmt" "fmt"
"io" "io"
"path"
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@@ -68,15 +67,7 @@ func init() {
IsPassword: true, IsPassword: true,
}, { }, {
Name: "show_mapping", Name: "show_mapping",
Help: `For all files listed show how the names encrypt. Help: "For all files listed show how the names encrypt.",
If this flag is set then for each file that the remote is asked to
list, it will log (at level INFO) a line stating the decrypted file
name and the encrypted file name.
This is so you can work out which encrypted names are which decrypted
names just in case you need to do something with the encrypted file
names, or for debugging purposes.`,
Default: false, Default: false,
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
Advanced: true, Advanced: true,
@@ -138,20 +129,16 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
if strings.HasPrefix(remote, name+":") { if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting") return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
} }
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
}
// Look for a file first // Look for a file first
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath)) remotePath := path.Join(remote, cipher.EncryptFileName(rpath))
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig) wrappedFs, err := fs.NewFs(remotePath)
// if that didn't produce a file, look for a directory // if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile { if err != fs.ErrorIsFile {
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath)) remotePath = path.Join(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig) wrappedFs, err = fs.NewFs(remotePath)
} }
if err != fs.ErrorIsFile && err != nil { if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath) return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remotePath)
} }
f := &Fs{ f := &Fs{
Fs: wrappedFs, Fs: wrappedFs,
@@ -173,7 +160,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
doChangeNotify := wrappedFs.Features().ChangeNotify doChangeNotify := wrappedFs.Features().ChangeNotify
if doChangeNotify != nil { if doChangeNotify != nil {
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) { f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
wrappedNotifyFunc := func(path string, entryType fs.EntryType) { wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
decrypted, err := f.DecryptFileName(path) decrypted, err := f.DecryptFileName(path)
if err != nil { if err != nil {
@@ -182,7 +169,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
} }
notifyFunc(decrypted, entryType) notifyFunc(decrypted, entryType)
} }
doChangeNotify(wrappedNotifyFunc, pollInterval) return doChangeNotify(wrappedNotifyFunc, pollInterval)
} }
} }
@@ -344,13 +331,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
if err != nil { if err != nil {
return nil, err return nil, err
} }
// unwrap the accounting
var wrap accounting.WrapFn
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
// add the hasher
wrappedIn = io.TeeReader(wrappedIn, hasher) wrappedIn = io.TeeReader(wrappedIn, hasher)
// wrap the accounting back on
wrappedIn = wrap(wrappedIn)
} }
// Transfer the data // Transfer the data
@@ -723,15 +704,15 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
// newDir returns a dir with the Name decrypted // newDir returns a dir with the Name decrypted
func (f *Fs) newDir(dir fs.Directory) fs.Directory { func (f *Fs) newDir(dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(dir) new := fs.NewDirCopy(dir)
remote := dir.Remote() remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote) decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil { if err != nil {
fs.Debugf(remote, "Undecryptable dir name: %v", err) fs.Debugf(remote, "Undecryptable dir name: %v", err)
} else { } else {
newDir.SetRemote(decryptedRemote) new.SetRemote(decryptedRemote)
} }
return newDir return new
} }
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source // ObjectInfo describes a wrapped fs.ObjectInfo for being the source

View File

@@ -7,30 +7,13 @@ import (
"testing" "testing"
"github.com/ncw/rclone/backend/crypt" "github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive" // for integration tests
_ "github.com/ncw/rclone/backend/local" _ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/swift" // for integration tests
"github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil),
})
}
// TestStandard runs integration tests against the remote // TestStandard runs integration tests against the remote
func TestStandard(t *testing.T) { func TestStandard(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard") tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt" name := "TestCrypt"
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
@@ -47,9 +30,6 @@ func TestStandard(t *testing.T) {
// TestOff runs integration tests against the remote // TestOff runs integration tests against the remote
func TestOff(t *testing.T) { func TestOff(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off") tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
name := "TestCrypt2" name := "TestCrypt2"
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
@@ -66,9 +46,6 @@ func TestOff(t *testing.T) {
// TestObfuscate runs integration tests against the remote // TestObfuscate runs integration tests against the remote
func TestObfuscate(t *testing.T) { func TestObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate") tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt3" name := "TestCrypt3"
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{

File diff suppressed because it is too large Load Diff

View File

@@ -1,55 +1,63 @@
package drive package drive
import ( import (
"bytes"
"encoding/json" "encoding/json"
"io"
"io/ioutil"
"mime"
"path/filepath"
"strings"
"testing" "testing"
_ "github.com/ncw/rclone/backend/local" "google.golang.org/api/drive/v3"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fstest/fstests"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3"
) )
/* const exampleExportFormats = `{
var additionalMimeTypes = map[string]string{ "application/vnd.google-apps.document": [
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm", "application/rtf",
"application/vnd.ms-excel.template.macroenabled.12": ".xltm", "application/vnd.oasis.opendocument.text",
"application/vnd.ms-powerpoint.presentation.macroenabled.12": ".pptm", "text/html",
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": ".ppsm", "application/pdf",
"application/vnd.ms-powerpoint.template.macroenabled.12": ".potm", "application/epub+zip",
"application/vnd.ms-powerpoint": ".ppt", "application/zip",
"application/vnd.ms-word.document.macroenabled.12": ".docm", "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"application/vnd.ms-word.template.macroenabled.12": ".dotm", "text/plain"
"application/vnd.openxmlformats-officedocument.presentationml.template": ".potx", ],
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": ".xltx", "application/vnd.google-apps.spreadsheet": [
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": ".dotx", "application/x-vnd.oasis.opendocument.spreadsheet",
"application/vnd.sun.xml.writer": ".sxw", "text/tab-separated-values",
"text/richtext": ".rtf", "application/pdf",
} "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
*/ "text/csv",
"application/zip",
"application/vnd.oasis.opendocument.spreadsheet"
],
"application/vnd.google-apps.jam": [
"application/pdf"
],
"application/vnd.google-apps.script": [
"application/vnd.google-apps.script+json"
],
"application/vnd.google-apps.presentation": [
"application/vnd.oasis.opendocument.presentation",
"application/pdf",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
"text/plain"
],
"application/vnd.google-apps.form": [
"application/zip"
],
"application/vnd.google-apps.drawing": [
"image/svg+xml",
"image/png",
"application/pdf",
"image/jpeg"
]
}`
var exportFormats map[string][]string
// Load the example export formats into exportFormats for testing // Load the example export formats into exportFormats for testing
func TestInternalLoadExampleFormats(t *testing.T) { func TestInternalLoadExampleExportFormats(t *testing.T) {
fetchFormatsOnce.Do(func() {}) assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &exportFormats))
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
var about struct {
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
ImportFormats map[string][]string `json:"importFormats,omitempty"`
}
require.NoError(t, err)
require.NoError(t, json.Unmarshal(buf, &about))
_exportFormats = fixMimeTypeMap(about.ExportFormats)
_importFormats = fixMimeTypeMap(about.ImportFormats)
} }
func TestInternalParseExtensions(t *testing.T) { func TestInternalParseExtensions(t *testing.T) {
@@ -58,195 +66,47 @@ func TestInternalParseExtensions(t *testing.T) {
want []string want []string
wantErr error wantErr error
}{ }{
{"doc", []string{".doc"}, nil}, {"doc", []string{"doc"}, nil},
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil}, {" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
{"docx,svg,Docx", []string{".docx", ".svg"}, nil}, {"docx,svg,Docx", []string{"docx", "svg"}, nil},
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)}, {"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
} { } {
extensions, _, gotErr := parseExtensions(test.in) f := new(Fs)
gotErr := f.parseExtensions(test.in)
if test.wantErr == nil { if test.wantErr == nil {
assert.NoError(t, gotErr) assert.NoError(t, gotErr)
} else { } else {
assert.EqualError(t, gotErr, test.wantErr.Error()) assert.EqualError(t, gotErr, test.wantErr.Error())
} }
assert.Equal(t, test.want, extensions) assert.Equal(t, test.want, f.extensions)
} }
// Test it is appending // Test it is appending
extensions, _, gotErr := parseExtensions("docx,svg", "docx,svg,xlsx") f := new(Fs)
assert.NoError(t, gotErr) assert.Nil(t, f.parseExtensions("docx,svg"))
assert.Equal(t, []string{".docx", ".svg", ".xlsx"}, extensions) assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
} }
func TestInternalFindExportFormat(t *testing.T) { func TestInternalFindExportFormat(t *testing.T) {
item := &drive.File{ item := new(drive.File)
Name: "file", item.MimeType = "application/vnd.google-apps.document"
MimeType: "application/vnd.google-apps.document",
}
for _, test := range []struct { for _, test := range []struct {
extensions []string extensions []string
wantExtension string wantExtension string
wantMimeType string wantMimeType string
}{ }{
{[]string{}, "", ""}, {[]string{}, "", ""},
{[]string{".pdf"}, ".pdf", "application/pdf"}, {[]string{"pdf"}, "pdf", "application/pdf"},
{[]string{".pdf", ".rtf", ".xls"}, ".pdf", "application/pdf"}, {[]string{"pdf", "rtf", "xls"}, "pdf", "application/pdf"},
{[]string{".xls", ".rtf", ".pdf"}, ".rtf", "application/rtf"}, {[]string{"xls", "rtf", "pdf"}, "rtf", "application/rtf"},
{[]string{".xls", ".csv", ".svg"}, "", ""}, {[]string{"xls", "csv", "svg"}, "", ""},
} { } {
f := new(Fs) f := new(Fs)
f.exportExtensions = test.extensions f.extensions = test.extensions
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item) gotExtension, gotMimeType := f.findExportFormat("file", exportFormats[item.MimeType])
assert.Equal(t, test.wantExtension, gotExtension) assert.Equal(t, test.wantExtension, gotExtension)
if test.wantExtension != "" {
assert.Equal(t, item.Name+gotExtension, gotFilename)
} else {
assert.Equal(t, "", gotFilename)
}
assert.Equal(t, test.wantMimeType, gotMimeType) assert.Equal(t, test.wantMimeType, gotMimeType)
assert.Equal(t, true, gotIsDocument)
} }
} }
func TestMimeTypesToExtension(t *testing.T) {
for mimeType, extension := range _mimeTypeToExtension {
extensions, err := mime.ExtensionsByType(mimeType)
assert.NoError(t, err)
assert.Contains(t, extensions, extension)
}
}
func TestExtensionToMimeType(t *testing.T) {
for mimeType, extension := range _mimeTypeToExtension {
gotMimeType := mime.TypeByExtension(extension)
mediatype, _, err := mime.ParseMediaType(gotMimeType)
assert.NoError(t, err)
assert.Equal(t, mimeType, mediatype)
}
}
func TestExtensionsForExportFormats(t *testing.T) {
if _exportFormats == nil {
t.Error("exportFormats == nil")
}
for fromMT, toMTs := range _exportFormats {
for _, toMT := range toMTs {
if !isInternalMimeType(toMT) {
extensions, err := mime.ExtensionsByType(toMT)
assert.NoError(t, err, "invalid MIME type %q", toMT)
assert.NotEmpty(t, extensions, "No extension found for %q (from: %q)", fromMT, toMT)
}
}
}
}
func TestExtensionsForImportFormats(t *testing.T) {
t.Skip()
if _importFormats == nil {
t.Error("_importFormats == nil")
}
for fromMT := range _importFormats {
if !isInternalMimeType(fromMT) {
extensions, err := mime.ExtensionsByType(fromMT)
assert.NoError(t, err, "invalid MIME type %q", fromMT)
assert.NotEmpty(t, extensions, "No extension found for %q", fromMT)
}
}
}
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
oldAllow := f.opt.AllowImportNameChange
f.opt.AllowImportNameChange = true
defer func() {
f.opt.AllowImportNameChange = oldAllow
}()
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err)
err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc")
require.NoError(t, err)
}
func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err)
err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods")
require.NoError(t, err)
}
func (f *Fs) InternalTestDocumentExport(t *testing.T) {
var buf bytes.Buffer
var err error
f.exportExtensions, _, err = parseExtensions("txt")
require.NoError(t, err)
obj, err := f.NewObject("example2.txt")
require.NoError(t, err)
rc, err := obj.Open()
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
_, err = io.Copy(&buf, rc)
require.NoError(t, err)
text := buf.String()
for _, excerpt := range []string{
"Lorem ipsum dolor sit amet, consectetur",
"porta at ultrices in, consectetur at augue.",
} {
require.Contains(t, text, excerpt)
}
}
func (f *Fs) InternalTestDocumentLink(t *testing.T) {
var buf bytes.Buffer
var err error
f.exportExtensions, _, err = parseExtensions("link.html")
require.NoError(t, err)
obj, err := f.NewObject("example2.link.html")
require.NoError(t, err)
rc, err := obj.Open()
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
_, err = io.Copy(&buf, rc)
require.NoError(t, err)
text := buf.String()
require.True(t, strings.HasPrefix(text, "<html>"))
require.True(t, strings.HasSuffix(text, "</html>\n"))
for _, excerpt := range []string{
`<meta http-equiv="refresh"`,
`Loading <a href="`,
} {
require.Contains(t, text, excerpt)
}
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("DocumentImport", f.InternalTestDocumentImport)
t.Run("DocumentUpdate", f.InternalTestDocumentUpdate)
t.Run("DocumentExport", f.InternalTestDocumentExport)
t.Run("DocumentLink", f.InternalTestDocumentLink)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1,10 +1,10 @@
// Test Drive filesystem interface // Test Drive filesystem interface
package drive package drive_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
@@ -12,23 +12,6 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestDrive:", RemoteName: "TestDrive:",
NilObject: (*Object)(nil), NilObject: (*drive.Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
CeilChunkSize: fstests.NextPowerOfTwo,
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -1,178 +0,0 @@
{
"importFormats": {
"text/tab-separated-values": [
"application/vnd.google-apps.spreadsheet"
],
"application/x-vnd.oasis.opendocument.presentation": [
"application/vnd.google-apps.presentation"
],
"image/jpeg": [
"application/vnd.google-apps.document"
],
"image/bmp": [
"application/vnd.google-apps.document"
],
"image/gif": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-excel.sheet.macroenabled.12": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-powerpoint.presentation.macroenabled.12": [
"application/vnd.google-apps.presentation"
],
"application/vnd.ms-word.template.macroenabled.12": [
"application/vnd.google-apps.document"
],
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": [
"application/vnd.google-apps.document"
],
"image/pjpeg": [
"application/vnd.google-apps.document"
],
"application/vnd.google-apps.script+text/plain": [
"application/vnd.google-apps.script"
],
"application/vnd.ms-excel": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.sun.xml.writer": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-word.document.macroenabled.12": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": [
"application/vnd.google-apps.presentation"
],
"text/rtf": [
"application/vnd.google-apps.document"
],
"text/plain": [
"application/vnd.google-apps.document"
],
"application/vnd.oasis.opendocument.spreadsheet": [
"application/vnd.google-apps.spreadsheet"
],
"application/x-vnd.oasis.opendocument.spreadsheet": [
"application/vnd.google-apps.spreadsheet"
],
"image/png": [
"application/vnd.google-apps.document"
],
"application/x-vnd.oasis.opendocument.text": [
"application/vnd.google-apps.document"
],
"application/msword": [
"application/vnd.google-apps.document"
],
"application/pdf": [
"application/vnd.google-apps.document"
],
"application/json": [
"application/vnd.google-apps.script"
],
"application/x-msmetafile": [
"application/vnd.google-apps.drawing"
],
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.ms-powerpoint": [
"application/vnd.google-apps.presentation"
],
"application/vnd.ms-excel.template.macroenabled.12": [
"application/vnd.google-apps.spreadsheet"
],
"image/x-bmp": [
"application/vnd.google-apps.document"
],
"application/rtf": [
"application/vnd.google-apps.document"
],
"application/vnd.openxmlformats-officedocument.presentationml.template": [
"application/vnd.google-apps.presentation"
],
"image/x-png": [
"application/vnd.google-apps.document"
],
"text/html": [
"application/vnd.google-apps.document"
],
"application/vnd.oasis.opendocument.text": [
"application/vnd.google-apps.document"
],
"application/vnd.openxmlformats-officedocument.presentationml.presentation": [
"application/vnd.google-apps.presentation"
],
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.google-apps.script+json": [
"application/vnd.google-apps.script"
],
"application/vnd.openxmlformats-officedocument.presentationml.slideshow": [
"application/vnd.google-apps.presentation"
],
"application/vnd.ms-powerpoint.template.macroenabled.12": [
"application/vnd.google-apps.presentation"
],
"text/csv": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.oasis.opendocument.presentation": [
"application/vnd.google-apps.presentation"
],
"image/jpg": [
"application/vnd.google-apps.document"
],
"text/richtext": [
"application/vnd.google-apps.document"
]
},
"exportFormats": {
"application/vnd.google-apps.document": [
"application/rtf",
"application/vnd.oasis.opendocument.text",
"text/html",
"application/pdf",
"application/epub+zip",
"application/zip",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"text/plain"
],
"application/vnd.google-apps.spreadsheet": [
"application/x-vnd.oasis.opendocument.spreadsheet",
"text/tab-separated-values",
"application/pdf",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"text/csv",
"application/zip",
"application/vnd.oasis.opendocument.spreadsheet"
],
"application/vnd.google-apps.jam": [
"application/pdf"
],
"application/vnd.google-apps.script": [
"application/vnd.google-apps.script+json"
],
"application/vnd.google-apps.presentation": [
"application/vnd.oasis.opendocument.presentation",
"application/pdf",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
"text/plain"
],
"application/vnd.google-apps.form": [
"application/zip"
],
"application/vnd.google-apps.drawing": [
"image/svg+xml",
"image/png",
"application/pdf",
"image/jpeg"
]
}
}

View File

@@ -50,12 +50,11 @@ type resumableUpload struct {
} }
// Upload the io.Reader in of size bytes with contentType and info // Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) { func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
params := url.Values{ params := make(url.Values)
"alt": {"json"}, params.Set("alt", "json")
"uploadType": {"resumable"}, params.Set("uploadType", "resumable")
"fields": {partialFields}, params.Set("fields", partialFields)
}
if f.isTeamDrive { if f.isTeamDrive {
params.Set("supportsTeamDrives", "true") params.Set("supportsTeamDrives", "true")
} }

View File

@@ -79,8 +79,8 @@ const (
// Choose 48MB which is 91% of Maximum speed. rclone by // Choose 48MB which is 91% of Maximum speed. rclone by
// default does 4 transfers so this should use 4*48MB = 192MB // default does 4 transfers so this should use 4*48MB = 192MB
// by default. // by default.
defaultChunkSize = 48 * fs.MebiByte defaultChunkSize = 48 * 1024 * 1024
maxChunkSize = 150 * fs.MebiByte maxChunkSize = 150 * 1024 * 1024
) )
var ( var (
@@ -121,14 +121,7 @@ func init() {
Help: "Dropbox App Client Secret\nLeave blank normally.", Help: "Dropbox App Client Secret\nLeave blank normally.",
}, { }, {
Name: "chunk_size", Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size. (< %v). Help: fmt.Sprintf("Upload chunk size. Max %v.", fs.SizeSuffix(maxChunkSize)),
Any files larger than this will be uploaded in chunks of this size.
Note that chunks are buffered in memory (one at a time) so rclone can
deal with retries. Setting this larger will increase the speed
slightly (at most 10%% for 128MB in tests) at the cost of using more
memory. It can be set smaller if you are tight on memory.`, fs.SizeSuffix(maxChunkSize)),
Default: fs.SizeSuffix(defaultChunkSize), Default: fs.SizeSuffix(defaultChunkSize),
Advanced: true, Advanced: true,
}}, }},
@@ -202,25 +195,6 @@ func shouldRetry(err error) (bool, error) {
return fserrors.ShouldRetry(err), err return fserrors.ShouldRetry(err), err
} }
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// NewFs contstructs an Fs from the path, container:path // NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
@@ -229,9 +203,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = checkUploadChunkSize(opt.ChunkSize) if opt.ChunkSize > maxChunkSize {
if err != nil { return nil, errors.Errorf("chunk size too big, must be < %v", maxChunkSize)
return nil, errors.Wrap(err, "dropbox: chunk size")
} }
// Convert the old token if it exists. The old token was just // Convert the old token if it exists. The old token was just

View File

@@ -1,10 +1,10 @@
// Test Dropbox filesystem interface // Test Dropbox filesystem interface
package dropbox package dropbox_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/backend/dropbox"
"github.com/ncw/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
@@ -12,15 +12,6 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestDropbox:", RemoteName: "TestDropbox:",
NilObject: (*Object)(nil), NilObject: (*dropbox.Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -704,11 +704,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
path := path.Join(o.fs.root, o.remote) path := path.Join(o.fs.root, o.remote)
// remove the file if upload failed // remove the file if upload failed
remove := func() { remove := func() {
// Give the FTP server a chance to get its internal state in order after the error.
// The error may have been local in which case we closed the connection. The server
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
// able to think of a better method to find out if the server has finished - ncw
time.Sleep(1 * time.Second)
removeErr := o.Remove() removeErr := o.Remove()
if removeErr != nil { if removeErr != nil {
fs.Debugf(o, "Failed to remove: %v", removeErr) fs.Debugf(o, "Failed to remove: %v", removeErr)
@@ -722,7 +717,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
} }
err = c.Stor(path, in) err = c.Stor(path, in)
if err != nil { if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors _ = c.Quit()
remove() remove()
return errors.Wrap(err, "update stor") return errors.Wrap(err, "update stor")
} }

View File

@@ -345,7 +345,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
// try loading service account credentials from env variable, then from a file // try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" { if opt.ServiceAccountCredentials != "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile)) loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file") return nil, errors.Wrap(err, "error opening service account credentials file")

View File

@@ -2,9 +2,7 @@ package hubic
import ( import (
"net/http" "net/http"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/swift" "github.com/ncw/swift"
) )
@@ -23,18 +21,13 @@ func newAuth(f *Fs) *auth {
// Request constructs a http.Request for authentication // Request constructs a http.Request for authentication
// //
// returns nil for not needed // returns nil for not needed
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) { func (a *auth) Request(*swift.Connection) (*http.Request, error) {
const retries = 10 err := a.f.getCredentials()
for try := 1; try <= retries; try++ { if err != nil {
err = a.f.getCredentials()
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
}
return nil, err return nil, err
} }
return nil, nil
}
// Response parses the result of an http request // Response parses the result of an http request
func (a *auth) Response(resp *http.Response) error { func (a *auth) Response(resp *http.Response) error {

View File

@@ -60,13 +60,13 @@ func init() {
log.Fatalf("Failed to configure token: %v", err) log.Fatalf("Failed to configure token: %v", err)
} }
}, },
Options: append([]fs.Option{{ Options: []fs.Option{{
Name: config.ConfigClientID, Name: config.ConfigClientID,
Help: "Hubic Client Id\nLeave blank normally.", Help: "Hubic Client Id\nLeave blank normally.",
}, { }, {
Name: config.ConfigClientSecret, Name: config.ConfigClientSecret,
Help: "Hubic Client Secret\nLeave blank normally.", Help: "Hubic Client Secret\nLeave blank normally.",
}}, swift.SharedOptions...), }},
}) })
} }

View File

@@ -1,267 +0,0 @@
package api
import (
"encoding/xml"
"fmt"
"time"
"github.com/pkg/errors"
)
const (
timeFormat = "2006-01-02-T15:04:05Z0700"
)
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
type Time time.Time
// UnmarshalXML turns XML into a Time
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var v string
if err := d.DecodeElement(&v, &start); err != nil {
return err
}
if v == "" {
*t = Time(time.Time{})
return nil
}
newTime, err := time.Parse(timeFormat, v)
if err == nil {
*t = Time(newTime)
}
return err
}
// MarshalXML turns a Time into XML
func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return e.EncodeElement(t.String(), start)
}
// Return Time string in Jottacloud format
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// Flag is a hacky type for checking if an attribute is present
type Flag bool
// UnmarshalXMLAttr sets Flag to true if the attribute is present
func (f *Flag) UnmarshalXMLAttr(attr xml.Attr) error {
*f = true
return nil
}
// MarshalXMLAttr : Do not use
func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
attr := xml.Attr{
Name: name,
Value: "false",
}
return attr, errors.New("unimplemented")
}
/*
GET http://www.jottacloud.com/JFS/<account>
<user time="2018-07-18-T21:39:10Z" host="dn-132">
<username>12qh1wsht8cssxdtwl15rqh9</username>
<account-type>free</account-type>
<locked>false</locked>
<capacity>5368709120</capacity>
<max-devices>-1</max-devices>
<max-mobile-devices>-1</max-mobile-devices>
<usage>0</usage>
<read-locked>false</read-locked>
<write-locked>false</write-locked>
<quota-write-locked>false</quota-write-locked>
<enable-sync>true</enable-sync>
<enable-foldershare>true</enable-foldershare>
<devices>
<device>
<name xml:space="preserve">Jotta</name>
<display_name xml:space="preserve">Jotta</display_name>
<type>JOTTA</type>
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
<size>0</size>
<modified>2018-07-15-T22:04:59Z</modified>
</device>
</devices>
</user>
*/
// AccountInfo represents a Jottacloud account
type AccountInfo struct {
Username string `xml:"username"`
AccountType string `xml:"account-type"`
Locked bool `xml:"locked"`
Capacity int64 `xml:"capacity"`
MaxDevices int `xml:"max-devices"`
MaxMobileDevices int `xml:"max-mobile-devices"`
Usage int64 `xml:"usage"`
ReadLocked bool `xml:"read-locked"`
WriteLocked bool `xml:"write-locked"`
QuotaWriteLocked bool `xml:"quota-write-locked"`
EnableSync bool `xml:"enable-sync"`
EnableFolderShare bool `xml:"enable-foldershare"`
Devices []JottaDevice `xml:"devices>device"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>
<device time="2018-07-23-T20:21:50Z" host="dn-158">
<name xml:space="preserve">Jotta</name>
<display_name xml:space="preserve">Jotta</display_name>
<type>JOTTA</type>
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
<size>0</size>
<modified>2018-07-15-T22:04:59Z</modified>
<user>12qh1wsht8cssxdtwl15rqh9</user>
<mountPoints>
<mountPoint>
<name xml:space="preserve">Archive</name>
<size>0</size>
<modified>2018-07-15-T22:04:59Z</modified>
</mountPoint>
<mountPoint>
<name xml:space="preserve">Shared</name>
<size>0</size>
<modified></modified>
</mountPoint>
<mountPoint>
<name xml:space="preserve">Sync</name>
<size>0</size>
<modified></modified>
</mountPoint>
</mountPoints>
<metadata first="" max="" total="3" num_mountpoints="3"/>
</device>
*/
// JottaDevice represents a Jottacloud Device
type JottaDevice struct {
Name string `xml:"name"`
DisplayName string `xml:"display_name"`
Type string `xml:"type"`
Sid string `xml:"sid"`
Size int64 `xml:"size"`
User string `xml:"user"`
MountPoints []JottaMountPoint `xml:"mountPoints>mountPoint"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>
<mountPoint time="2018-07-24-T20:35:02Z" host="dn-157">
<name xml:space="preserve">Sync</name>
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</path>
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</abspath>
<size>0</size>
<modified></modified>
<device>Jotta</device>
<user>12qh1wsht8cssxdtwl15rqh9</user>
<folders>
<folder name="test"/>
</folders>
<metadata first="" max="" total="1" num_folders="1" num_files="0"/>
</mountPoint>
*/
// JottaMountPoint represents a Jottacloud mountpoint
type JottaMountPoint struct {
Name string `xml:"name"`
Size int64 `xml:"size"`
Device string `xml:"device"`
Folders []JottaFolder `xml:"folders>folder"`
Files []JottaFile `xml:"files>file"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/<folder>
<folder name="test" time="2018-07-24-T20:41:37Z" host="dn-158">
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</path>
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</abspath>
<folders>
<folder name="t2"/>c
</folders>
<files>
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
<currentRevision>
<number>1</number>
<state>COMPLETED</state>
<created>2018-07-05-T15:08:02Z</created>
<modified>2018-07-05-T15:08:02Z</modified>
<mime>application/octet-stream</mime>
<size>30827730</size>
<md5>1e8a7b728ab678048df00075c9507158</md5>
<updated>2018-07-24-T20:41:10Z</updated>
</currentRevision>
</file>
</files>
<metadata first="" max="" total="2" num_folders="1" num_files="1"/>
</folder>
*/
// JottaFolder represents a JottacloudFolder
type JottaFolder struct {
XMLName xml.Name
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
Path string `xml:"path"`
CreatedAt Time `xml:"created"`
ModifiedAt Time `xml:"modified"`
Updated Time `xml:"updated"`
Folders []JottaFolder `xml:"folders>folder"`
Files []JottaFile `xml:"files>file"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
<currentRevision>
<number>1</number>
<state>COMPLETED</state>
<created>2018-07-05-T15:08:02Z</created>
<modified>2018-07-05-T15:08:02Z</modified>
<mime>application/octet-stream</mime>
<size>30827730</size>
<md5>1e8a7b728ab678048df00075c9507158</md5>
<updated>2018-07-24-T20:41:10Z</updated>
</currentRevision>
</file>
*/
// JottaFile represents a Jottacloud file
type JottaFile struct {
XMLName xml.Name
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
PublicSharePath string `xml:"publicSharePath"`
State string `xml:"currentRevision>state"`
CreatedAt Time `xml:"currentRevision>created"`
ModifiedAt Time `xml:"currentRevision>modified"`
Updated Time `xml:"currentRevision>updated"`
Size int64 `xml:"currentRevision>size"`
MimeType string `xml:"currentRevision>mime"`
MD5 string `xml:"currentRevision>md5"`
}
// Error is a custom Error for wrapping Jottacloud error responses
type Error struct {
StatusCode int `xml:"code"`
Message string `xml:"message"`
Reason string `xml:"reason"`
Cause string `xml:"cause"`
}
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("error %d", e.StatusCode)
if e.Message != "" {
out += ": " + e.Message
}
if e.Reason != "" {
out += fmt.Sprintf(" (%+v)", e.Reason)
}
return out
}

View File

@@ -1,29 +0,0 @@
package api
import (
"encoding/xml"
"testing"
"time"
)
func TestMountpointEmptyModificationTime(t *testing.T) {
mountpoint := `
<mountPoint time="2018-08-12-T09:58:24Z" host="dn-157">
<name xml:space="preserve">Sync</name>
<path xml:space="preserve">/foo/Jotta</path>
<abspath xml:space="preserve">/foo/Jotta</abspath>
<size>0</size>
<modified></modified>
<device>Jotta</device>
<user>foo</user>
<metadata first="" max="" total="0" num_folders="0" num_files="0"/>
</mountPoint>
`
var jf JottaFolder
if err := xml.Unmarshal([]byte(mountpoint), &jf); err != nil {
t.Fatal(err)
}
if !time.Time(jf.ModifiedAt).IsZero() {
t.Errorf("got non-zero time, want zero")
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,42 +0,0 @@
package jottacloud
import (
"crypto/md5"
"fmt"
"io"
"testing"
"github.com/ncw/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestReadMD5(t *testing.T) {
// Check readMD5 for different size and threshold
for _, size := range []int64{0, 1024, 10 * 1024, 100 * 1024} {
t.Run(fmt.Sprintf("%d", size), func(t *testing.T) {
hasher := md5.New()
n, err := io.Copy(hasher, readers.NewPatternReader(size))
require.NoError(t, err)
assert.Equal(t, n, size)
wantMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
for _, threshold := range []int64{512, 1024, 10 * 1024, 20 * 1024} {
t.Run(fmt.Sprintf("%d", threshold), func(t *testing.T) {
in := readers.NewPatternReader(size)
gotMD5, out, cleanup, err := readMD5(in, size, threshold)
defer cleanup()
require.NoError(t, err)
assert.Equal(t, wantMD5, gotMD5)
// check md5hash of out
hasher := md5.New()
n, err := io.Copy(hasher, out)
require.NoError(t, err)
assert.Equal(t, n, size)
outMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
assert.Equal(t, wantMD5, outMD5)
})
}
})
}
}

View File

@@ -1,17 +0,0 @@
// Test Box filesystem interface
package jottacloud_test
import (
"testing"
"github.com/ncw/rclone/backend/jottacloud"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestJottacloud:",
NilObject: (*jottacloud.Object)(nil),
})
}

View File

@@ -1,77 +0,0 @@
/*
Translate file names for JottaCloud adapted from OneDrive
The following characters are JottaClous reserved characters, and can't
be used in JottaCloud folder and file names.
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
*/
package jottacloud
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// Onedrive has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'*': '', // FULLWIDTH ASTERISK
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'?': '', // FULLWIDTH QUESTION MARK
':': '', // FULLWIDTH COLON
';': '', // FULLWIDTH SEMICOLON
'|': '', // FULLWIDTH VERTICAL LINE
'"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// Filenames can't start with space
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Filenames can't end with space
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@@ -1,28 +0,0 @@
package jottacloud
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{`\*<>?:;|"`, ``},
{`\*<>?:;|"\*<>?:;|"`, ``},
{" leading space", "␠leading space"},
{"trailing space ", "trailing space␠"},
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
{"trailing space /trailing space /trailing space ", "trailing space␠/trailing space␠/trailing space␠"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

View File

@@ -16,10 +16,8 @@ import (
"unicode/utf8" "unicode/utf8"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers" "github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
@@ -50,32 +48,18 @@ func init() {
Advanced: true, Advanced: true,
}, { }, {
Name: "skip_links", Name: "skip_links",
Help: `Don't warn about skipped symlinks. Help: "Don't warn about skipped symlinks.",
This flag disables warning messages on skipped symlinks or junction
points, as you explicitly acknowledge that they should be skipped.`,
Default: false, Default: false,
NoPrefix: true, NoPrefix: true,
Advanced: true, Advanced: true,
}, { }, {
Name: "no_unicode_normalization", Name: "no_unicode_normalization",
Help: `Don't apply unicode normalization to paths and filenames (Deprecated) Help: "Don't apply unicode normalization to paths and filenames",
This flag is deprecated now. Rclone no longer normalizes unicode file
names, but it compares them with unicode normalization in the sync
routine instead.`,
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
Name: "no_check_updated", Name: "no_check_updated",
Help: `Don't check to see if the files change during upload Help: "Don't check to see if the files change during upload",
Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy
- source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (eg
[Glusterfs #2206](https://github.com/ncw/rclone/issues/2206)) so this
check can be disabled with this flag.`,
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
@@ -167,7 +151,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
if err == nil && fi.Mode().IsRegular() { if err == nil && fi.Mode().IsRegular() {
// It is a file, so use the parent as the root // It is a file, so use the parent as the root
f.root = filepath.Dir(f.root) f.root, _ = getDirFile(f.root)
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
return f, fs.ErrorIsFile return f, fs.ErrorIsFile
} }
@@ -296,13 +280,6 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// Follow symlinks if required // Follow symlinks if required
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 { if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
fi, err = os.Stat(newPath) fi, err = os.Stat(newPath)
if os.IsNotExist(err) {
// Skip bad symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
fs.Errorf(newRemote, "Listing error: %v", err)
accounting.Stats.Error(err)
continue
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -588,7 +565,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
} }
// Create parent of destination // Create parent of destination
dstParentPath := filepath.Dir(dstPath) dstParentPath, _ := getDirFile(dstPath)
err = os.MkdirAll(dstParentPath, 0777) err = os.MkdirAll(dstParentPath, 0777)
if err != nil { if err != nil {
return err return err
@@ -807,7 +784,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// mkdirAll makes all the directories needed to store the object // mkdirAll makes all the directories needed to store the object
func (o *Object) mkdirAll() error { func (o *Object) mkdirAll() error {
dir := filepath.Dir(o.path) dir, _ := getDirFile(o.path)
return os.MkdirAll(dir, 0777) return os.MkdirAll(dir, 0777)
} }
@@ -831,12 +808,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
return err return err
} }
// Pre-allocate the file for performance reasons
err = preAllocate(src.Size(), out)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
// Calculate the hash of the object we are reading as we go along // Calculate the hash of the object we are reading as we go along
hash, err := hash.NewMultiHasherTypes(hashes) hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil { if err != nil {
@@ -901,6 +872,17 @@ func (o *Object) Remove() error {
return remove(o.path) return remove(o.path)
} }
// Return the directory and file from an OS path. Assumes
// os.PathSeparator is used.
func getDirFile(s string) (string, string) {
i := strings.LastIndex(s, string(os.PathSeparator))
dir, file := s[:i], s[i+1:]
if dir == "" {
dir = string(os.PathSeparator)
}
return dir, file
}
// cleanPathFragment cleans an OS path fragment which is part of a // cleanPathFragment cleans an OS path fragment which is part of a
// bigger path and not necessarily absolute // bigger path and not necessarily absolute
func cleanPathFragment(s string) string { func cleanPathFragment(s string) string {

View File

@@ -1,10 +0,0 @@
//+build !windows,!linux
package local
import "os"
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
return nil
}

View File

@@ -1,22 +0,0 @@
//+build linux
package local
import (
"os"
"golang.org/x/sys/unix"
)
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
if size <= 0 {
return nil
}
err := unix.Fallocate(int(out.Fd()), unix.FALLOC_FL_KEEP_SIZE, 0, size)
// FIXME could be doing something here
// if err == unix.ENOSPC {
// log.Printf("No space")
// }
return err
}

View File

@@ -1,79 +0,0 @@
//+build windows
package local
import (
"os"
"syscall"
"unsafe"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
)
var (
ntdll = windows.NewLazySystemDLL("ntdll.dll")
ntQueryVolumeInformationFile = ntdll.NewProc("NtQueryVolumeInformationFile")
ntSetInformationFile = ntdll.NewProc("NtSetInformationFile")
)
type fileAllocationInformation struct {
AllocationSize uint64
}
type fileFsSizeInformation struct {
TotalAllocationUnits uint64
AvailableAllocationUnits uint64
SectorsPerAllocationUnit uint32
BytesPerSector uint32
}
type ioStatusBlock struct {
Status, Information uintptr
}
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
if size <= 0 {
return nil
}
var (
iosb ioStatusBlock
fsSizeInfo fileFsSizeInformation
allocInfo fileAllocationInformation
)
// Query info about the block sizes on the file system
_, _, e1 := ntQueryVolumeInformationFile.Call(
uintptr(out.Fd()),
uintptr(unsafe.Pointer(&iosb)),
uintptr(unsafe.Pointer(&fsSizeInfo)),
uintptr(unsafe.Sizeof(fsSizeInfo)),
uintptr(3), // FileFsSizeInformation
)
if e1 != nil && e1 != syscall.Errno(0) {
return errors.Wrap(e1, "preAllocate NtQueryVolumeInformationFile failed")
}
// Calculate the allocation size
clusterSize := uint64(fsSizeInfo.BytesPerSector) * uint64(fsSizeInfo.SectorsPerAllocationUnit)
if clusterSize <= 0 {
return errors.Errorf("preAllocate clusterSize %d <= 0", clusterSize)
}
allocInfo.AllocationSize = (1 + uint64(size-1)/clusterSize) * clusterSize
// Ask for the allocation
_, _, e1 = ntSetInformationFile.Call(
uintptr(out.Fd()),
uintptr(unsafe.Pointer(&iosb)),
uintptr(unsafe.Pointer(&allocInfo)),
uintptr(unsafe.Sizeof(allocInfo)),
uintptr(19), // FileAllocationInformation
)
if e1 != nil && e1 != syscall.Errno(0) {
return errors.Wrap(e1, "preAllocate NtSetInformationFile failed")
}
return nil
}

View File

@@ -40,6 +40,7 @@ const (
maxSleep = 2 * time.Second maxSleep = 2 * time.Second
eventWaitTime = 500 * time.Millisecond eventWaitTime = 500 * time.Millisecond
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
useTrash = true // FIXME make configurable - rclone global
) )
var ( var (
@@ -64,19 +65,7 @@ func init() {
IsPassword: true, IsPassword: true,
}, { }, {
Name: "debug", Name: "debug",
Help: `Output more debug from Mega. Help: "If set then output more debug from mega.",
If this flag is set (along with -vv) it will print further debugging
information from the mega backend.`,
Default: false,
Advanced: true,
}, {
Name: "hard_delete",
Help: `Delete files permanently rather than putting them into the trash.
Normally the mega backend will put all deletions into the trash rather
than permanently deleting them. If you specify this then rclone will
permanently delete objects instead.`,
Default: false, Default: false,
Advanced: true, Advanced: true,
}}, }},
@@ -88,7 +77,6 @@ type Options struct {
User string `config:"user"` User string `config:"user"`
Pass string `config:"pass"` Pass string `config:"pass"`
Debug bool `config:"debug"` Debug bool `config:"debug"`
HardDelete bool `config:"hard_delete"`
} }
// Fs represents a remote mega // Fs represents a remote mega
@@ -569,7 +557,7 @@ func (f *Fs) Mkdir(dir string) error {
// deleteNode removes a file or directory, observing useTrash // deleteNode removes a file or directory, observing useTrash
func (f *Fs) deleteNode(node *mega.Node) (err error) { func (f *Fs) deleteNode(node *mega.Node) (err error) {
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
err = f.srv.Delete(node, f.opt.HardDelete) err = f.srv.Delete(node, !useTrash)
return shouldRetry(err) return shouldRetry(err)
}) })
return err return err

View File

@@ -9,9 +9,6 @@ import (
const ( const (
timeFormat = `"` + time.RFC3339 + `"` timeFormat = `"` + time.RFC3339 + `"`
// PackageTypeOneNote is the package type value for OneNote files
PackageTypeOneNote = "oneNote"
) )
// Error is returned from one drive when things go wrong // Error is returned from one drive when things go wrong
@@ -97,7 +94,6 @@ type ItemReference struct {
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only. DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
ID string `json:"id"` // Unique identifier for the item. Read/Write. ID string `json:"id"` // Unique identifier for the item. Read/Write.
Path string `json:"path"` // Path that used to navigate to the item. Read/Write. Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
DriveType string `json:"driveType"` // Type of the drive, Read-Only
} }
// RemoteItemFacet groups data needed to reference a OneDrive remote item // RemoteItemFacet groups data needed to reference a OneDrive remote item
@@ -110,7 +106,6 @@ type RemoteItemFacet struct {
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only. LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only. Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only. File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write. FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write. ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
Size int64 `json:"size"` // Size of the item in bytes. Read-only. Size int64 `json:"size"` // Size of the item in bytes. Read-only.
@@ -151,13 +146,6 @@ type FileSystemInfoFacet struct {
type DeletedFacet struct { type DeletedFacet struct {
} }
// PackageFacet indicates that a DriveItem is the top level item
// in a "package" or a collection of items that should be treated as a collection instead of individual items.
// `oneNote` is the only currently defined value.
type PackageFacet struct {
Type string `json:"type"`
}
// Item represents metadata for an item in OneDrive // Item represents metadata for an item in OneDrive
type Item struct { type Item struct {
ID string `json:"id"` // The unique identifier of the item within the Drive. Read-only. ID string `json:"id"` // The unique identifier of the item within the Drive. Read-only.
@@ -181,7 +169,6 @@ type Item struct {
// Audio *AudioFacet `json:"audio"` // Audio metadata, if the item is an audio file. Read-only. // Audio *AudioFacet `json:"audio"` // Audio metadata, if the item is an audio file. Read-only.
// Video *VideoFacet `json:"video"` // Video metadata, if the item is a video. Read-only. // Video *VideoFacet `json:"video"` // Video metadata, if the item is a video. Read-only.
// Location *LocationFacet `json:"location"` // Location metadata, if the item has location data. Read-only. // Location *LocationFacet `json:"location"` // Location metadata, if the item has location data. Read-only.
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only. Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
} }
@@ -250,28 +237,6 @@ type MoveItemRequest struct {
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo,omitempty"` // File system information on client. Read-write. FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo,omitempty"` // File system information on client. Read-write.
} }
//CreateShareLinkRequest is the request to create a sharing link
//Always Type:view and Scope:anonymous for public sharing
type CreateShareLinkRequest struct {
Type string `json:"type"` //Link type in View, Edit or Embed
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization
}
//CreateShareLinkResponse is the response from CreateShareLinkRequest
type CreateShareLinkResponse struct {
ID string `json:"id"`
Roles []string `json:"roles"`
Link struct {
Type string `json:"type"`
Scope string `json:"scope"`
WebURL string `json:"webUrl"`
Application struct {
ID string `json:"id"`
DisplayName string `json:"displayName"`
} `json:"application"`
} `json:"link"`
}
// AsyncOperationStatus provides information on the status of a asynchronous job progress. // AsyncOperationStatus provides information on the status of a asynchronous job progress.
// //
// The following API calls return AsyncOperationStatus resources: // The following API calls return AsyncOperationStatus resources:
@@ -279,6 +244,7 @@ type CreateShareLinkResponse struct {
// Copy Item // Copy Item
// Upload From URL // Upload From URL
type AsyncOperationStatus struct { type AsyncOperationStatus struct {
Operation string `json:"operation"` // The type of job being run.
PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete. PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete.
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting" Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
} }
@@ -315,24 +281,6 @@ func (i *Item) GetFolder() *FolderFacet {
return i.Folder return i.Folder
} }
// GetPackage returns a normalized Package of the item
func (i *Item) GetPackage() *PackageFacet {
if i.IsRemote() && i.RemoteItem.Package != nil {
return i.RemoteItem.Package
}
return i.Package
}
// GetPackageType returns the package type of the item if available,
// otherwise ""
func (i *Item) GetPackageType() string {
pack := i.GetPackage()
if pack == nil {
return ""
}
return pack.Type
}
// GetFile returns a normalized File of the item // GetFile returns a normalized File of the item
func (i *Item) GetFile() *FileFacet { func (i *Item) GetFile() *FileFacet {
if i.IsRemote() && i.RemoteItem.File != nil { if i.IsRemote() && i.RemoteItem.File != nil {

View File

@@ -10,6 +10,7 @@ import (
"io" "io"
"log" "log"
"net/http" "net/http"
"net/url"
"path" "path"
"strings" "strings"
"time" "time"
@@ -32,34 +33,48 @@ import (
) )
const ( const (
rcloneClientID = "b15665d9-eda6-4092-8539-0eec376afd59" rclonePersonalClientID = "0000000044165769"
rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R" rclonePersonalEncryptedClientSecret = "ugVWLNhKkVT1-cbTRO-6z1MlzwdW6aMwpKgNaFG-qXjEn_WfDnG9TVyRA5yuoliU"
rcloneBusinessClientID = "52857fec-4bc2-483f-9f1b-5fe28e97532c"
rcloneBusinessEncryptedClientSecret = "6t4pC8l6L66SFYVIi8PgECDyjXy_ABo1nsTaE-Lr9LpzC6yT4vNOwHsakwwdEui0O6B0kX8_xbBLj91J"
minSleep = 10 * time.Millisecond minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
graphURL = "https://graph.microsoft.com/v1.0" rootURLPersonal = "https://api.onedrive.com/v1.0/drive" // root URL for requests
configDriveID = "drive_id" discoveryServiceURL = "https://api.office.com/discovery/"
configDriveType = "drive_type" configResourceURL = "resource_url"
driveTypePersonal = "personal"
driveTypeBusiness = "business"
driveTypeSharepoint = "documentLibrary"
defaultChunkSize = 10 * fs.MebiByte
chunkSizeMultiple = 320 * fs.KibiByte
) )
// Globals // Globals
var ( var (
// Description of how to auth for this app for a business account // Description of how to auth for this app for a personal account
oauthConfig = &oauth2.Config{ oauthPersonalConfig = &oauth2.Config{
Endpoint: oauth2.Endpoint{ Scopes: []string{
AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize", "wl.signin", // Allow single sign-on capabilities
TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token", "wl.offline_access", // Allow receiving a refresh token
"onedrive.readwrite", // r/w perms to all of a user's OneDrive files
}, },
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}, Endpoint: oauth2.Endpoint{
ClientID: rcloneClientID, AuthURL: "https://login.live.com/oauth20_authorize.srf",
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), TokenURL: "https://login.live.com/oauth20_token.srf",
},
ClientID: rclonePersonalClientID,
ClientSecret: obscure.MustReveal(rclonePersonalEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
} }
// Description of how to auth for this app for a business account
oauthBusinessConfig = &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: "https://login.microsoftonline.com/common/oauth2/authorize",
TokenURL: "https://login.microsoftonline.com/common/oauth2/token",
},
ClientID: rcloneBusinessClientID,
ClientSecret: obscure.MustReveal(rcloneBusinessEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
oauthBusinessResource = oauth2.SetAuthURLParam("resource", discoveryServiceURL)
sharedURL = "https://api.onedrive.com/v1.0/drives" // root URL for remote shared resources
) )
// Register with Fs // Register with Fs
@@ -69,7 +84,28 @@ func init() {
Description: "Microsoft OneDrive", Description: "Microsoft OneDrive",
NewFs: NewFs, NewFs: NewFs,
Config: func(name string, m configmap.Mapper) { Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("onedrive", name, m, oauthConfig) // choose account type
fmt.Printf("Choose OneDrive account type?\n")
fmt.Printf(" * Say b for a OneDrive business account\n")
fmt.Printf(" * Say p for a personal OneDrive account\n")
isPersonal := config.Command([]string{"bBusiness", "pPersonal"}) == 'p'
if isPersonal {
// for personal accounts we don't safe a field about the account
err := oauthutil.Config("onedrive", name, m, oauthPersonalConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
} else {
err := oauthutil.ConfigErrorCheck("onedrive", name, m, func(req *http.Request) oauthutil.AuthError {
var resp oauthutil.AuthError
resp.Name = req.URL.Query().Get("error")
resp.Code = strings.Split(req.URL.Query().Get("error_description"), ":")[0] // error_description begins with XXXXXXXXXXXX:
resp.Description = strings.Join(strings.Split(req.URL.Query().Get("error_description"), ":")[1:], ":")
resp.HelpURL = "https://rclone.org/onedrive/#troubleshooting"
return resp
}, oauthBusinessConfig, oauthBusinessResource)
if err != nil { if err != nil {
log.Fatalf("Failed to configure token: %v", err) log.Fatalf("Failed to configure token: %v", err)
return return
@@ -81,131 +117,114 @@ func init() {
return return
} }
type driveResource struct { type serviceResource struct {
DriveID string `json:"id"` ServiceAPIVersion string `json:"serviceApiVersion"`
DriveName string `json:"name"` ServiceEndpointURI string `json:"serviceEndpointUri"`
DriveType string `json:"driveType"` ServiceResourceID string `json:"serviceResourceId"`
} }
type drivesResponse struct { type serviceResponse struct {
Drives []driveResource `json:"value"` Services []serviceResource `json:"value"`
} }
type siteResource struct { oAuthClient, _, err := oauthutil.NewClient(name, m, oauthBusinessConfig)
SiteID string `json:"id"`
SiteName string `json:"displayName"`
SiteURL string `json:"webUrl"`
}
type siteResponse struct {
Sites []siteResource `json:"value"`
}
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil { if err != nil {
log.Fatalf("Failed to configure OneDrive: %v", err) log.Fatalf("Failed to configure OneDrive: %v", err)
return
} }
srv := rest.NewClient(oAuthClient) srv := rest.NewClient(oAuthClient)
var opts rest.Opts opts := rest.Opts{
var finalDriveID string
var siteID string
switch config.Choose("Your choice",
[]string{"onedrive", "sharepoint", "driveid", "siteid", "search"},
[]string{"OneDrive Personal or Business", "Root Sharepoint site", "Type in driveID", "Type in SiteID", "Search a Sharepoint site"},
false) {
case "onedrive":
opts = rest.Opts{
Method: "GET", Method: "GET",
RootURL: graphURL, RootURL: discoveryServiceURL,
Path: "/me/drives", Path: "/v2.0/me/services",
} }
case "sharepoint": services := serviceResponse{}
opts = rest.Opts{ resp, err := srv.CallJSON(&opts, nil, &services)
Method: "GET",
RootURL: graphURL,
Path: "/sites/root/drives",
}
case "driveid":
fmt.Printf("Paste your Drive ID here> ")
finalDriveID = config.ReadLine()
case "siteid":
fmt.Printf("Paste your Site ID here> ")
siteID = config.ReadLine()
case "search":
fmt.Printf("What to search for> ")
searchTerm := config.ReadLine()
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites?search=" + searchTerm,
}
sites := siteResponse{}
_, err := srv.CallJSON(&opts, nil, &sites)
if err != nil { if err != nil {
log.Fatalf("Failed to query available sites: %v", err) fs.Errorf(nil, "Failed to query available services: %v", err)
return
}
if resp.StatusCode != 200 {
fs.Errorf(nil, "Failed to query available services: Got HTTP error code %d", resp.StatusCode)
return
} }
if len(sites.Sites) == 0 { var resourcesURL []string
log.Fatalf("Search for '%s' returned no results", searchTerm) var resourcesID []string
for _, service := range services.Services {
if service.ServiceAPIVersion == "v2.0" {
resourcesID = append(resourcesID, service.ServiceResourceID)
resourcesURL = append(resourcesURL, service.ServiceEndpointURI)
}
// we only support 2.0 API
fs.Infof(nil, "Skipping API %s endpoint %s", service.ServiceAPIVersion, service.ServiceEndpointURI)
}
var foundService string
if len(resourcesID) == 0 {
fs.Errorf(nil, "No Service found")
return
} else if len(resourcesID) == 1 {
foundService = resourcesID[0]
} else { } else {
fmt.Printf("Found %d sites, please select the one you want to use:\n", len(sites.Sites)) foundService = config.Choose("Choose resource URL", resourcesID, resourcesURL, false)
for index, site := range sites.Sites {
fmt.Printf("%d: %s (%s) id=%s\n", index, site.SiteName, site.SiteURL, site.SiteID)
}
siteID = sites.Sites[config.ChooseNumber("Chose drive to use:", 0, len(sites.Sites)-1)].SiteID
}
} }
// if we have a siteID we need to ask for the drives m.Set(configResourceURL, foundService)
if siteID != "" { oauthBusinessResource = oauth2.SetAuthURLParam("resource", foundService)
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/" + siteID + "/drives",
}
}
// We don't have the final ID yet? // get the token from the inital config
// query Microsoft Graph // we need to update the token with a resource
if finalDriveID == "" { // specific token we will query now
drives := drivesResponse{} token, err := oauthutil.GetToken(name, m)
_, err := srv.CallJSON(&opts, nil, &drives)
if err != nil { if err != nil {
log.Fatalf("Failed to query available drives: %v", err) fs.Errorf(nil, "Error while getting token: %s", err)
return
} }
if len(drives.Drives) == 0 { // values for the token query
log.Fatalf("No drives found") values := url.Values{}
} else { values.Set("refresh_token", token.RefreshToken)
fmt.Printf("Found %d drives, please select the one you want to use:\n", len(drives.Drives)) values.Set("grant_type", "refresh_token")
for index, drive := range drives.Drives { values.Set("resource", foundService)
fmt.Printf("%d: %s (%s) id=%s\n", index, drive.DriveName, drive.DriveType, drive.DriveID) values.Set("client_id", oauthBusinessConfig.ClientID)
} values.Set("client_secret", oauthBusinessConfig.ClientSecret)
finalDriveID = drives.Drives[config.ChooseNumber("Chose drive to use:", 0, len(drives.Drives)-1)].DriveID
}
}
// Test the driveID and get drive type
opts = rest.Opts{ opts = rest.Opts{
Method: "GET", Method: "POST",
RootURL: graphURL, RootURL: oauthBusinessConfig.Endpoint.TokenURL,
Path: "/drives/" + finalDriveID + "/root"} ContentType: "application/x-www-form-urlencoded",
var rootItem api.Item Body: strings.NewReader(values.Encode()),
_, err = srv.CallJSON(&opts, nil, &rootItem) }
// tokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
// we are only interested in the new tokens, all other fields we don't care
type tokenJSON struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
}
jsonToken := tokenJSON{}
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
if err != nil { if err != nil {
log.Fatalf("Failed to query root for drive %s: %v", finalDriveID, err) fs.Errorf(nil, "Failed to get resource token: %v", err)
return
}
if resp.StatusCode != 200 {
fs.Errorf(nil, "Failed to get resource token: Got HTTP error code %d", resp.StatusCode)
return
} }
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL) // update the tokens
// This does not work, YET :) token.AccessToken = jsonToken.AccessToken
if !config.Confirm() { token.RefreshToken = jsonToken.RefreshToken
log.Fatalf("Cancelled by user")
}
m.Set(configDriveID, finalDriveID) // finally save them in the config
m.Set(configDriveType, rootItem.ParentReference.DriveType) err = oauthutil.PutToken(name, m, token, true)
config.SaveConfig() if err != nil {
fs.Errorf(nil, "Error while setting token: %s", err)
}
}
}, },
Options: []fs.Option{{ Options: []fs.Option{{
Name: config.ConfigClientID, Name: config.ConfigClientID,
@@ -215,32 +234,8 @@ func init() {
Help: "Microsoft App Client Secret\nLeave blank normally.", Help: "Microsoft App Client Secret\nLeave blank normally.",
}, { }, {
Name: "chunk_size", Name: "chunk_size",
Help: `Chunk size to upload files with - must be multiple of 320k. Help: "Chunk size to upload files with - must be multiple of 320k.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
Above this size files will be chunked - must be multiple of 320k. Note
that the chunks will be buffered into memory.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "drive_id",
Help: "The ID of the drive to use",
Default: "",
Advanced: true,
}, {
Name: "drive_type",
Help: "The type of the drive ( personal | business | documentLibrary )",
Default: "",
Advanced: true,
}, {
Name: "expose_onenote_files",
Help: `Set to make OneNote files show up in directory listings.
By default rclone will hide OneNote files in directory listings because
operations like "Open" and "Update" won't work on them. But this
behaviour may also prevent you from deleting them. If you want to
delete OneNote files or otherwise want them to show up in directory
listing, set this option.`,
Default: false,
Advanced: true, Advanced: true,
}}, }},
}) })
@@ -249,9 +244,7 @@ listing, set this option.`,
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"` ResourceURL string `config:"resource_url"`
DriveType string `config:"drive_type"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
} }
// Fs represents a remote one drive // Fs represents a remote one drive
@@ -264,8 +257,7 @@ type Fs struct {
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls pacer *pacer.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
driveID string // ID to use for querying Microsoft Graph isBusiness bool // true if this is an OneDrive Business account
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
} }
// Object describes a one drive object // Object describes a one drive object
@@ -275,7 +267,6 @@ type Object struct {
fs *Fs // what this object is part of fs *Fs // what this object is part of
remote string // The remote path remote string // The remote path
hasMetaData bool // whether info below has been set hasMetaData bool // whether info below has been set
isOneNoteFile bool // Whether the object is a OneNote file
size int64 // size of the object size int64 // size of the object
modTime time.Time // modification time of the object modTime time.Time // modification time of the object
id string // ID of the object id string // ID of the object
@@ -336,18 +327,10 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// readMetaDataForPath reads the metadata from the path // readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) { func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
var opts rest.Opts opts := rest.Opts{
if len(path) == 0 {
opts = rest.Opts{
Method: "GET",
Path: "/root",
}
} else {
opts = rest.Opts{
Method: "GET", Method: "GET",
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)), Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
} }
}
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, nil, &info) resp, err = f.srv.CallJSON(&opts, nil, &info)
return shouldRetry(resp, err) return shouldRetry(resp, err)
@@ -370,25 +353,6 @@ func errorHandler(resp *http.Response) error {
return errResponse return errResponse
} }
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
if cs%chunkSizeMultiple != 0 {
return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
}
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
@@ -397,16 +361,26 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if opt.ChunkSize%(320*1024) != 0 {
err = checkUploadChunkSize(opt.ChunkSize) return nil, errors.Errorf("chunk size %d is not a multiple of 320k", opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "onedrive: chunk size")
} }
// if we have a resource URL it's a business account otherwise a personal one
isBusiness := opt.ResourceURL != ""
var rootURL string
var oauthConfig *oauth2.Config
if !isBusiness {
// personal account setup
oauthConfig = oauthPersonalConfig
rootURL = rootURLPersonal
} else {
// business account setup
oauthConfig = oauthBusinessConfig
rootURL = opt.ResourceURL + "_api/v2.0/drives/me"
sharedURL = opt.ResourceURL + "_api/v2.0/drives"
if opt.DriveID == "" || opt.DriveType == "" { // update the URL in the AuthOptions
log.Fatalf("Unable to get drive_id and drive_type. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend.") oauthBusinessResource = oauth2.SetAuthURLParam("resource", opt.ResourceURL)
} }
root = parsePath(root) root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig) oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil { if err != nil {
@@ -417,14 +391,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
driveID: opt.DriveID, srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
driveType: opt.DriveType,
srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
isBusiness: isBusiness,
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
ReadMimeType: true, // OneDrive for business doesn't support mime types properly
// so we disable it until resolved
// https://github.com/OneDrive/onedrive-api-docs/issues/643
ReadMimeType: !f.isBusiness,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
}).Fill(f) }).Fill(f)
f.srv.SetErrorHandler(errorHandler) f.srv.SetErrorHandler(errorHandler)
@@ -448,16 +424,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
// Assume it is a file // Assume it is a file
newRoot, remote := dircache.SplitPath(root) newRoot, remote := dircache.SplitPath(root)
tempF := *f newF := *f
tempF.dirCache = dircache.New(newRoot, rootInfo.ID, &tempF) newF.dirCache = dircache.New(newRoot, rootInfo.ID, &newF)
tempF.root = newRoot newF.root = newRoot
// Make new Fs which is the parent // Make new Fs which is the parent
err = tempF.dirCache.FindRoot(false) err = newF.dirCache.FindRoot(false)
if err != nil { if err != nil {
// No root so return old f // No root so return old f
return f, nil return f, nil
} }
_, err := tempF.newObjectWithInfo(remote, nil) _, err := newF.newObjectWithInfo(remote, nil)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -465,13 +441,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
return nil, err return nil, err
} }
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
return f, fs.ErrorIsFile return &newF, fs.ErrorIsFile
} }
return f, nil return f, nil
} }
@@ -532,9 +503,6 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
} }
return "", false, err return "", false, err
} }
if info.GetPackageType() == api.PackageTypeOneNote {
return "", false, errors.New("found OneNote file when looking for folder")
}
if info.GetFolder() == nil { if info.GetFolder() == nil {
return "", false, errors.New("found file when looking for folder") return "", false, errors.New("found file when looking for folder")
} }
@@ -578,7 +546,8 @@ type listAllFn func(*api.Item) bool
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
// Top parameter asks for bigger pages of data // Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm // https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := newOptsCall(dirID, "GET", "/children?$top=1000") opts := newOptsCall(dirID, "GET", "/children?top=1000")
OUTER: OUTER:
for { for {
var result api.ListChildrenResponse var result api.ListChildrenResponse
@@ -643,11 +612,6 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
} }
var iErr error var iErr error
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool { _, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
fs.Debugf(info.Name, "OneNote file not shown in directory listing")
return false
}
remote := path.Join(dir, info.GetName()) remote := path.Join(dir, info.GetName())
folder := info.GetFolder() folder := info.GetFolder()
if folder != nil { if folder != nil {
@@ -754,17 +718,15 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
if err != nil { if err != nil {
return err return err
} }
if check { item, _, err := f.readMetaDataForPath(root)
// check to see if there are any items
found, err := f.listAll(rootID, false, false, func(item *api.Item) bool {
return true
})
if err != nil { if err != nil {
return err return err
} }
if found { if item.Folder == nil {
return fs.ErrorDirectoryNotEmpty return errors.New("not a folder")
} }
if check && item.Folder.ChildCount != 0 {
return errors.New("folder not empty")
} }
err = f.deleteObject(rootID) err = f.deleteObject(rootID)
if err != nil { if err != nil {
@@ -793,11 +755,16 @@ func (f *Fs) Precision() time.Duration {
func (f *Fs) waitForJob(location string, o *Object) error { func (f *Fs) waitForJob(location string, o *Object) error {
deadline := time.Now().Add(fs.Config.Timeout) deadline := time.Now().Add(fs.Config.Timeout)
for time.Now().Before(deadline) { for time.Now().Before(deadline) {
opts := rest.Opts{
Method: "GET",
RootURL: location,
IgnoreStatus: true, // Ignore the http status response since it seems to return valid info on 500 errors
}
var resp *http.Response var resp *http.Response
var err error var err error
var body []byte var body []byte
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = http.Get(location) resp, err = f.srv.Call(&opts)
if err != nil { if err != nil {
return fserrors.ShouldRetry(err), err return fserrors.ShouldRetry(err), err
} }
@@ -813,18 +780,19 @@ func (f *Fs) waitForJob(location string, o *Object) error {
if err != nil { if err != nil {
return errors.Wrapf(err, "async status result not JSON: %q", body) return errors.Wrapf(err, "async status result not JSON: %q", body)
} }
// See if we decoded anything...
switch status.Status { if !(status.Operation == "" && status.PercentageComplete == 0 && status.Status == "") {
case "failed": if status.Status == "failed" || status.Status == "deleteFailed" {
case "deleteFailed": return errors.Errorf("%s: async operation %q returned %q", o.remote, status.Operation, status.Status)
{
return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
} }
case "completed": } else if resp.StatusCode == 200 {
err = o.readMetaData() var info api.Item
return errors.Wrapf(err, "async operation completed but readMetaData failed") err = json.Unmarshal(body, &info)
if err != nil {
return errors.Wrapf(err, "async item result not JSON: %q", body)
}
return o.setMetaData(&info)
} }
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
} }
return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout) return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout)
@@ -863,23 +831,22 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
} }
// Copy the object // Copy the object
opts := newOptsCall(srcObj.id, "POST", "/copy") opts := newOptsCall(srcObj.id, "POST", "/action.copy")
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"} opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
opts.NoResponse = true opts.NoResponse = true
id, _, _ := parseDirID(directoryID) id, _, _ := parseDirID(directoryID)
replacedLeaf := replaceReservedChars(leaf) replacedLeaf := replaceReservedChars(leaf)
copyReq := api.CopyItemRequest{ copy := api.CopyItemRequest{
Name: &replacedLeaf, Name: &replacedLeaf,
ParentReference: api.ItemReference{ ParentReference: api.ItemReference{
DriveID: f.driveID,
ID: id, ID: id,
}, },
} }
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &copyReq, nil) resp, err = f.srv.CallJSON(&opts, &copy, nil)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -973,110 +940,6 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
return dstObj, nil return dstObj, nil
} }
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
// find the root src directory
err := srcFs.dirCache.FindRoot(false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf, dstDirectoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
leaf, dstDirectoryID, err = f.dirCache.FindPath(findPath, true)
if err != nil {
return err
}
parsedDstDirID, _, _ := parseDirID(dstDirectoryID)
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
if err != nil {
return err
}
// Get timestamps of src so they can be preserved
srcInfo, _, err := srcFs.readMetaDataForPath(srcPath)
if err != nil {
return err
}
// Do the move
opts := newOptsCall(srcID, "PATCH", "")
move := api.MoveItemRequest{
Name: replaceReservedChars(leaf),
ParentReference: &api.ItemReference{
ID: parsedDstDirID,
},
// We set the mod time too as it gets reset otherwise
FileSystemInfo: &api.FileSystemInfoFacet{
CreatedDateTime: srcInfo.CreatedDateTime,
LastModifiedDateTime: srcInfo.LastModifiedDateTime,
},
}
var resp *http.Response
var info api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &move, &info)
return shouldRetry(resp, err)
})
if err != nil {
return err
}
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// DirCacheFlush resets the directory cache - used in testing as an // DirCacheFlush resets the directory cache - used in testing as an
// optional interface // optional interface
func (f *Fs) DirCacheFlush() { func (f *Fs) DirCacheFlush() {
@@ -1110,36 +973,10 @@ func (f *Fs) About() (usage *fs.Usage, err error) {
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set { func (f *Fs) Hashes() hash.Set {
if f.driveType == driveTypePersonal { if f.isBusiness {
return hash.Set(hash.SHA1)
}
return hash.Set(hash.QuickXorHash) return hash.Set(hash.QuickXorHash)
} }
return hash.Set(hash.SHA1)
// PublicLink returns a link for downloading without accout.
func (f *Fs) PublicLink(remote string) (link string, err error) {
info, _, err := f.readMetaDataForPath(f.srvPath(remote))
if err != nil {
return "", err
}
opts := newOptsCall(info.ID, "POST", "/createLink")
share := api.CreateShareLinkRequest{
Type: "view",
Scope: "anonymous",
}
var resp *http.Response
var result api.CreateShareLinkResponse
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &share, &result)
return shouldRetry(resp, err)
})
if err != nil {
fmt.Println(err)
return "", err
}
return result.Link.WebURL, nil
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -1162,29 +999,24 @@ func (o *Object) Remote() string {
return o.remote return o.remote
} }
// srvPath returns a path for use in server given a remote
func (f *Fs) srvPath(remote string) string {
return replaceReservedChars(f.rootSlash() + remote)
}
// srvPath returns a path for use in server // srvPath returns a path for use in server
func (o *Object) srvPath() string { func (o *Object) srvPath() string {
return o.fs.srvPath(o.remote) return replaceReservedChars(o.fs.rootSlash() + o.remote)
} }
// Hash returns the SHA-1 of an object returning a lowercase hex string // Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if o.fs.driveType == driveTypePersonal { if o.fs.isBusiness {
if t == hash.SHA1 { if t != hash.QuickXorHash {
return o.sha1, nil return "", hash.ErrUnsupported
} }
} else {
if t == hash.QuickXorHash {
return o.quickxorhash, nil return o.quickxorhash, nil
} }
} if t != hash.SHA1 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
return o.sha1, nil
}
// Size returns the size of an object in bytes // Size returns the size of an object in bytes
func (o *Object) Size() int64 { func (o *Object) Size() int64 {
@@ -1204,8 +1036,6 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
o.hasMetaData = true o.hasMetaData = true
o.size = info.GetSize() o.size = info.GetSize()
o.isOneNoteFile = info.GetPackageType() == api.PackageTypeOneNote
// Docs: https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/hashes // Docs: https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/hashes
// //
// We use SHA1 for onedrive personal and QuickXorHash for onedrive for business // We use SHA1 for onedrive personal and QuickXorHash for onedrive for business
@@ -1317,10 +1147,6 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.id == "" { if o.id == "" {
return nil, errors.New("can't download - no id") return nil, errors.New("can't download - no id")
} }
if o.isOneNoteFile {
return nil, errors.New("can't open a OneNote file")
}
fs.FixRangeOption(options, o.size) fs.FixRangeOption(options, o.size)
var resp *http.Response var resp *http.Response
opts := newOptsCall(o.id, "GET", "/content") opts := newOptsCall(o.id, "GET", "/content")
@@ -1350,12 +1176,12 @@ func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUpl
opts = rest.Opts{ opts = rest.Opts{
Method: "POST", Method: "POST",
RootURL: rootURL, RootURL: rootURL,
Path: "/" + drive + "/items/" + id + ":/" + rest.URLPathEscape(replaceReservedChars(leaf)) + ":/createUploadSession", Path: "/" + drive + "/items/" + id + ":/" + rest.URLPathEscape(leaf) + ":/upload.createSession",
} }
} else { } else {
opts = rest.Opts{ opts = rest.Opts{
Method: "POST", Method: "POST",
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/createUploadSession", Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/upload.createSession",
} }
} }
createRequest := api.CreateUploadRequest{} createRequest := api.CreateUploadRequest{}
@@ -1364,12 +1190,6 @@ func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUpl
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, &createRequest, &response) resp, err = o.fs.srv.CallJSON(&opts, &createRequest, &response)
if apiErr, ok := err.(*api.Error); ok {
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
// Make the error more user-friendly
err = errors.New(err.Error() + " (is it a OneNote file?)")
}
}
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
return response, err return response, err
@@ -1423,10 +1243,6 @@ func (o *Object) cancelUploadSession(url string) (err error) {
// uploadMultipart uploads a file using multipart upload // uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) { func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
if size <= 0 {
panic("size passed into uploadMultipart must be > 0")
}
// Create upload session // Create upload session
fs.Debugf(o, "Starting multipart upload") fs.Debugf(o, "Starting multipart upload")
session, err := o.createUploadSession(modTime) session, err := o.createUploadSession(modTime)
@@ -1467,14 +1283,8 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
return info, nil return info, nil
} }
// Update the content of a remote file within 4MB size in one single request // uploadSinglepart uploads a file as a single part
// This function will set modtime after uploading, which will create a new version for the remote file
func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) { func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
panic("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
}
fs.Debugf(o, "Starting singlepart upload")
var resp *http.Response var resp *http.Response
var opts rest.Opts var opts rest.Opts
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false) _, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
@@ -1495,19 +1305,13 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
Body: in, Body: in,
} }
} }
// for go1.8 (see release notes) we must nil the Body if we want a
// "Content-Length: 0" header which onedrive requires for all files.
if size == 0 { if size == 0 {
opts.Body = nil opts.Body = nil
} }
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, nil, &info) resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
if apiErr, ok := err.(*api.Error); ok {
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
// Make the error more user-friendly
err = errors.New(err.Error() + " (is it a OneNote file?)")
}
}
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -1526,10 +1330,6 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.hasMetaData && o.isOneNoteFile {
return errors.New("can't upload content to a OneNote file")
}
o.fs.tokenRenewer.Start() o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop() defer o.fs.tokenRenewer.Stop()
@@ -1537,17 +1337,15 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
modTime := src.ModTime() modTime := src.ModTime()
var info *api.Item var info *api.Item
if size > 0 { if size <= 0 {
info, err = o.uploadMultipart(in, size, modTime) // This is for 0 length files, or files with an unknown size
} else if size == 0 {
info, err = o.uploadSinglepart(in, size, modTime) info, err = o.uploadSinglepart(in, size, modTime)
} else { } else {
panic("src file size must be >= 0") info, err = o.uploadMultipart(in, size, modTime)
} }
if err != nil { if err != nil {
return err return err
} }
return o.setMetaData(info) return o.setMetaData(info)
} }
@@ -1585,7 +1383,7 @@ func newOptsCall(id string, method string, route string) (opts rest.Opts) {
func parseDirID(ID string) (string, string, string) { func parseDirID(ID string) (string, string, string) {
if strings.Index(ID, "#") >= 0 { if strings.Index(ID, "#") >= 0 {
s := strings.Split(ID, "#") s := strings.Split(ID, "#")
return s[1], s[0], graphURL + "/drives" return s[1], s[0], sharedURL
} }
return ID, "", "" return ID, "", ""
} }
@@ -1596,10 +1394,9 @@ var (
_ fs.Purger = (*Fs)(nil) _ fs.Purger = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil) _ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil) _ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil) // _ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.MimeTyper = &Object{} _ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{} _ fs.IDer = &Object{}

View File

@@ -1,10 +1,10 @@
// Test OneDrive filesystem interface // Test OneDrive filesystem interface
package onedrive package onedrive_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/backend/onedrive"
"github.com/ncw/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
@@ -12,15 +12,6 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestOneDrive:", RemoteName: "TestOneDrive:",
NilObject: (*Object)(nil), NilObject: (*onedrive.Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
CeilChunkSize: fstests.NextMultipleOf(chunkSizeMultiple),
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -140,7 +140,7 @@ func TestQuickXorHashByBlock(t *testing.T) {
got := h.Sum(nil) got := h.Sum(nil)
want, err := base64.StdEncoding.DecodeString(test.out) want, err := base64.StdEncoding.DecodeString(test.out)
require.NoError(t, err, what) require.NoError(t, err, what)
assert.Equal(t, want, got, test.size, what) assert.Equal(t, want, got[:], test.size, what)
} }
} }
} }

View File

@@ -6,7 +6,6 @@ import (
"io" "io"
"mime/multipart" "mime/multipart"
"net/http" "net/http"
"net/url"
"path" "path"
"strconv" "strconv"
"strings" "strings"
@@ -21,7 +20,6 @@ import (
"github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache" "github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/pacer" "github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/ncw/rclone/lib/rest" "github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@@ -179,17 +177,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
// Assume it is a file // Assume it is a file
newRoot, remote := dircache.SplitPath(root) newRoot, remote := dircache.SplitPath(root)
tempF := *f newF := *f
tempF.dirCache = dircache.New(newRoot, "0", &tempF) newF.dirCache = dircache.New(newRoot, "0", &newF)
tempF.root = newRoot newF.root = newRoot
// Make new Fs which is the parent // Make new Fs which is the parent
err = tempF.dirCache.FindRoot(false) err = newF.dirCache.FindRoot(false)
if err != nil { if err != nil {
// No root so return old f // No root so return old f
return f, nil return f, nil
} }
_, err := tempF.newObjectWithInfo(remote, nil) _, err := newF.newObjectWithInfo(remote, nil)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -197,13 +195,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
return nil, err return nil, err
} }
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
return f, fs.ErrorIsFile return &newF, fs.ErrorIsFile
} }
return f, nil return f, nil
} }
@@ -932,9 +925,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
// resp.Body.Close() // resp.Body.Close()
// fs.Debugf(nil, "PostOpen: %#v", openResponse) // fs.Debugf(nil, "PostOpen: %#v", openResponse)
// 10 MB chunks size // 1 MB chunks size
chunkSize := int64(1024 * 1024 * 10) chunkSize := int64(1024 * 1024 * 10)
buf := make([]byte, int(chunkSize))
chunkOffset := int64(0) chunkOffset := int64(0)
remainingBytes := size remainingBytes := size
chunkCounter := 0 chunkCounter := 0
@@ -947,19 +939,14 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
remainingBytes -= currentChunkSize remainingBytes -= currentChunkSize
fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes) fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes)
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, currentChunkSize)
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, err
}
var formBody bytes.Buffer var formBody bytes.Buffer
w := multipart.NewWriter(&formBody) w := multipart.NewWriter(&formBody)
fw, err := w.CreateFormFile("file_data", o.remote) fw, err := w.CreateFormFile("file_data", o.remote)
if err != nil { if err != nil {
return false, err return false, err
} }
if _, err = io.Copy(fw, chunk); err != nil { if _, err = io.CopyN(fw, in, currentChunkSize); err != nil {
return false, err return false, err
} }
// Add session_id // Add session_id
@@ -1090,7 +1077,7 @@ func (o *Object) readMetaData() (err error) {
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + url.QueryEscape(replaceReservedChars(leaf)), Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + rest.URLPathEscape(replaceReservedChars(leaf)),
} }
resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList) resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList)
return o.fs.shouldRetry(resp, err) return o.fs.shouldRetry(resp, err)

View File

@@ -276,16 +276,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
// Assume it is a file // Assume it is a file
newRoot, remote := dircache.SplitPath(root) newRoot, remote := dircache.SplitPath(root)
tempF := *f newF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF) newF.dirCache = dircache.New(newRoot, rootID, &newF)
tempF.root = newRoot newF.root = newRoot
// Make new Fs which is the parent // Make new Fs which is the parent
err = tempF.dirCache.FindRoot(false) err = newF.dirCache.FindRoot(false)
if err != nil { if err != nil {
// No root so return old f // No root so return old f
return f, nil return f, nil
} }
_, err := tempF.newObjectWithInfo(remote, nil) _, err := newF.newObjectWithInfo(remote, nil)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -293,13 +293,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
return nil, err return nil, err
} }
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
return f, fs.ErrorIsFile return &newF, fs.ErrorIsFile
} }
return f, nil return f, nil
} }
@@ -1112,12 +1107,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
// sometimes pcloud leaves a half complete file on
// error, so delete it if it exists
delObj, delErr := o.fs.NewObject(o.remote)
if delErr == nil && delObj != nil {
_ = delObj.Remove()
}
return err return err
} }
if len(result.Items) != 1 { if len(result.Items) != 1 {

View File

@@ -132,12 +132,10 @@ type Object struct {
// ------------------------------------------------------------ // ------------------------------------------------------------
// Pattern to match a qingstor path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a qingstor 'url' // parseParse parses a qingstor 'url'
func qsParsePath(path string) (bucket, key string, err error) { func qsParsePath(path string) (bucket, key string, err error) {
// Pattern to match a qingstor path // Pattern to match a qingstor path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
parts := matcher.FindStringSubmatch(path) parts := matcher.FindStringSubmatch(path)
if parts == nil { if parts == nil {
err = errors.Errorf("Couldn't parse bucket out of qingstor path %q", path) err = errors.Errorf("Couldn't parse bucket out of qingstor path %q", path)

View File

@@ -39,11 +39,9 @@ import (
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp" "github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk" "github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest" "github.com/ncw/rclone/lib/rest"
"github.com/ncw/swift" "github.com/ncw/swift"
"github.com/pkg/errors" "github.com/pkg/errors"
@@ -448,12 +446,7 @@ func init() {
Provider: "!AWS,IBMCOS", Provider: "!AWS,IBMCOS",
}, { }, {
Name: "acl", Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects. Help: "Canned ACL used when creating buckets and/or storing objects in S3.\nFor more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl",
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when server side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.`,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "private", Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).", Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
@@ -505,24 +498,10 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
}, { }, {
Value: "AES256", Value: "AES256",
Help: "AES256", Help: "AES256",
}, {
Value: "aws:kms",
Help: "aws:kms",
}},
}, {
Name: "sse_kms_key_id",
Help: "If using KMS ID you must provide the ARN of Key.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}, {
Value: "arn:aws:kms:us-east-1:*",
Help: "arn:aws:kms:*",
}}, }},
}, { }, {
Name: "storage_class", Name: "storage_class",
Help: "The storage class to use when storing new objects in S3.", Help: "The storage class to use when storing objects in S3.",
Provider: "AWS", Provider: "AWS",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "", Value: "",
@@ -542,17 +521,8 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
}}, }},
}, { }, {
Name: "chunk_size", Name: "chunk_size",
Help: `Chunk size to use for uploading. Help: "Chunk size to use for uploading",
Default: fs.SizeSuffix(s3manager.MinUploadPartSize),
Any files larger than this will be uploaded in chunks of this
size. The default is 5MB. The minimum is 5MB.
Note that "--s3-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.`,
Default: minChunkSize,
Advanced: true, Advanced: true,
}, { }, {
Name: "disable_checksum", Name: "disable_checksum",
@@ -562,41 +532,13 @@ enough memory, then increasing this will speed up the transfers.`,
}, { }, {
Name: "session_token", Name: "session_token",
Help: "An AWS session token", Help: "An AWS session token",
Hide: fs.OptionHideBoth,
Advanced: true, Advanced: true,
}, { }, {
Name: "upload_concurrency", Name: "upload_concurrency",
Help: `Concurrency for multipart uploads. Help: "Concurrency for multipart uploads.",
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large file over high speed link
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 2, Default: 2,
Advanced: true, Advanced: true,
}, {
Name: "force_path_style",
Help: `If true use path style access if false use virtual hosted style.
If this is true (the default) then rclone will use path style access,
if false then rclone will use virtual path style. See [the AWS S3
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
for more info.
Some providers (eg Aliyun OSS or Netease COS) require this set to false.`,
Default: true,
Advanced: true,
}, {
Name: "v2_auth",
Help: `If true use v2 authentication.
If this is false (the default) then rclone will use v4 authentication.
If it is set then rclone will use v2 authentication.
Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
Default: false,
Advanced: true,
}}, }},
}) })
} }
@@ -609,8 +551,6 @@ const (
maxRetries = 10 // number of retries to make of operations maxRetries = 10 // number of retries to make of operations
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
) )
// Options defines the configuration for this backend // Options defines the configuration for this backend
@@ -624,14 +564,11 @@ type Options struct {
LocationConstraint string `config:"location_constraint"` LocationConstraint string `config:"location_constraint"`
ACL string `config:"acl"` ACL string `config:"acl"`
ServerSideEncryption string `config:"server_side_encryption"` ServerSideEncryption string `config:"server_side_encryption"`
SSEKMSKeyID string `config:"sse_kms_key_id"`
StorageClass string `config:"storage_class"` StorageClass string `config:"storage_class"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableChecksum bool `config:"disable_checksum"` DisableChecksum bool `config:"disable_checksum"`
SessionToken string `config:"session_token"` SessionToken string `config:"session_token"`
UploadConcurrency int `config:"upload_concurrency"` UploadConcurrency int `config:"upload_concurrency"`
ForcePathStyle bool `config:"force_path_style"`
V2Auth bool `config:"v2_auth"`
} }
// Fs represents a remote s3 server // Fs represents a remote s3 server
@@ -646,7 +583,6 @@ type Fs struct {
bucketOKMu sync.Mutex // mutex to protect bucket OK bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket bucketDeleted bool // true if we have deleted the bucket
pacer *pacer.Pacer // To pace the API calls
} }
// Object describes a s3 object // Object describes a s3 object
@@ -692,39 +628,8 @@ func (f *Fs) Features() *fs.Features {
return f.features return f.features
} }
// retryErrorCodes is a slice of error codes that we will retry
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
var retryErrorCodes = []int{
409, // Conflict - various states that could be resolved on a retry
503, // Service Unavailable/Slow Down - "Reduce your request rate"
}
//S3 is pretty resilient, and the built in retry handling is probably sufficient
// as it should notice closed connections and timeouts which are the most likely
// sort of failure modes
func shouldRetry(err error) (bool, error) {
// If this is an awserr object, try and extract more useful information to determine if we should retry
if awsError, ok := err.(awserr.Error); ok {
// Simple case, check the original embedded error in case it's generically retriable
if fserrors.ShouldRetry(awsError.OrigErr()) {
return true, err
}
//Failing that, if it's a RequestFailure it's probably got an http status code we can check
if reqErr, ok := err.(awserr.RequestFailure); ok {
for _, e := range retryErrorCodes {
if reqErr.StatusCode() == e {
return true, err
}
}
}
}
//Ok, not an awserr, check for generic failure conditions
return fserrors.ShouldRetry(err), err
}
// Pattern to match a s3 path // Pattern to match a s3 path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`) var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parseParse parses a s3 'url' // parseParse parses a s3 'url'
func s3ParsePath(path string) (bucket, directory string, err error) { func s3ParsePath(path string) (bucket, directory string, err error) {
@@ -802,11 +707,11 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
WithCredentials(cred). WithCredentials(cred).
WithEndpoint(opt.Endpoint). WithEndpoint(opt.Endpoint).
WithHTTPClient(fshttp.NewClient(fs.Config)). WithHTTPClient(fshttp.NewClient(fs.Config)).
WithS3ForcePathStyle(opt.ForcePathStyle) WithS3ForcePathStyle(true)
// awsConfig.WithLogLevel(aws.LogDebugWithSigning) // awsConfig.WithLogLevel(aws.LogDebugWithSigning)
ses := session.New() ses := session.New()
c := s3.New(ses, awsConfig) c := s3.New(ses, awsConfig)
if opt.V2Auth || opt.Region == "other-v2-signature" { if opt.Region == "other-v2-signature" {
fs.Debugf(nil, "Using v2 auth") fs.Debugf(nil, "Using v2 auth")
signer := func(req *request.Request) { signer := func(req *request.Request) {
// Ignore AnonymousCredentials object // Ignore AnonymousCredentials object
@@ -822,21 +727,6 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
return c, ses, nil return c, ses, nil
} }
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// NewFs constructs an Fs from the path, bucket:path // NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
@@ -845,9 +735,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = checkUploadChunkSize(opt.ChunkSize) if opt.ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) {
if err != nil { return nil, errors.Errorf("s3 chunk size (%v) must be >= %v", opt.ChunkSize, fs.SizeSuffix(s3manager.MinUploadPartSize))
return nil, errors.Wrap(err, "s3: chunk size")
} }
bucket, directory, err := s3ParsePath(root) bucket, directory, err := s3ParsePath(root)
if err != nil { if err != nil {
@@ -864,7 +753,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
c: c, c: c,
bucket: bucket, bucket: bucket,
ses: ses, ses: ses,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
ReadMimeType: true, ReadMimeType: true,
@@ -878,10 +766,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
Bucket: &f.bucket, Bucket: &f.bucket,
Key: &directory, Key: &directory,
} }
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.HeadObject(&req) _, err = f.c.HeadObject(&req)
return shouldRetry(err)
})
if err == nil { if err == nil {
f.root = path.Dir(directory) f.root = path.Dir(directory)
if f.root == "." { if f.root == "." {
@@ -958,12 +843,7 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
MaxKeys: &maxKeys, MaxKeys: &maxKeys,
Marker: marker, Marker: marker,
} }
var resp *s3.ListObjectsOutput resp, err := f.c.ListObjects(&req)
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListObjects(&req)
return shouldRetry(err)
})
if err != nil { if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok { if awsErr, ok := err.(awserr.RequestFailure); ok {
if awsErr.StatusCode() == http.StatusNotFound { if awsErr.StatusCode() == http.StatusNotFound {
@@ -1088,11 +968,7 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
return nil, fs.ErrorListBucketRequired return nil, fs.ErrorListBucketRequired
} }
req := s3.ListBucketsInput{} req := s3.ListBucketsInput{}
var resp *s3.ListBucketsOutput resp, err := f.c.ListBuckets(&req)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListBuckets(&req)
return shouldRetry(err)
})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1177,10 +1053,7 @@ func (f *Fs) dirExists() (bool, error) {
req := s3.HeadBucketInput{ req := s3.HeadBucketInput{
Bucket: &f.bucket, Bucket: &f.bucket,
} }
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.HeadBucket(&req) _, err := f.c.HeadBucket(&req)
return shouldRetry(err)
})
if err == nil { if err == nil {
return true, nil return true, nil
} }
@@ -1217,10 +1090,7 @@ func (f *Fs) Mkdir(dir string) error {
LocationConstraint: &f.opt.LocationConstraint, LocationConstraint: &f.opt.LocationConstraint,
} }
} }
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.CreateBucket(&req) _, err := f.c.CreateBucket(&req)
return shouldRetry(err)
})
if err, ok := err.(awserr.Error); ok { if err, ok := err.(awserr.Error); ok {
if err.Code() == "BucketAlreadyOwnedByYou" { if err.Code() == "BucketAlreadyOwnedByYou" {
err = nil err = nil
@@ -1245,10 +1115,7 @@ func (f *Fs) Rmdir(dir string) error {
req := s3.DeleteBucketInput{ req := s3.DeleteBucketInput{
Bucket: &f.bucket, Bucket: &f.bucket,
} }
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.DeleteBucket(&req) _, err := f.c.DeleteBucket(&req)
return shouldRetry(err)
})
if err == nil { if err == nil {
f.bucketOK = false f.bucketOK = false
f.bucketDeleted = true f.bucketDeleted = true
@@ -1291,24 +1158,11 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
source := pathEscape(srcFs.bucket + "/" + srcFs.root + srcObj.remote) source := pathEscape(srcFs.bucket + "/" + srcFs.root + srcObj.remote)
req := s3.CopyObjectInput{ req := s3.CopyObjectInput{
Bucket: &f.bucket, Bucket: &f.bucket,
ACL: &f.opt.ACL,
Key: &key, Key: &key,
CopySource: &source, CopySource: &source,
MetadataDirective: aws.String(s3.MetadataDirectiveCopy), MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
} }
if f.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &f.opt.ServerSideEncryption
}
if f.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
}
if f.opt.StorageClass != "" {
req.StorageClass = &f.opt.StorageClass
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.CopyObject(&req) _, err = f.c.CopyObject(&req)
return shouldRetry(err)
})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1385,12 +1239,7 @@ func (o *Object) readMetaData() (err error) {
Bucket: &o.fs.bucket, Bucket: &o.fs.bucket,
Key: &key, Key: &key,
} }
var resp *s3.HeadObjectOutput resp, err := o.fs.c.HeadObject(&req)
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.HeadObject(&req)
return shouldRetry(err)
})
if err != nil { if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok { if awsErr, ok := err.(awserr.RequestFailure); ok {
if awsErr.StatusCode() == http.StatusNotFound { if awsErr.StatusCode() == http.StatusNotFound {
@@ -1474,19 +1323,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
Metadata: o.meta, Metadata: o.meta,
MetadataDirective: &directive, MetadataDirective: &directive,
} }
if o.fs.opt.ServerSideEncryption != "" { _, err = o.fs.c.CopyObject(&req)
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
err = o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.CopyObject(&req)
return shouldRetry(err)
})
return err return err
} }
@@ -1513,12 +1350,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
} }
} }
} }
var resp *s3.GetObjectOutput resp, err := o.fs.c.GetObject(&req)
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.GetObject(&req)
return shouldRetry(err)
})
if err, ok := err.(awserr.RequestFailure); ok { if err, ok := err.(awserr.RequestFailure); ok {
if err.Code() == "InvalidObjectState" { if err.Code() == "InvalidObjectState" {
return nil, errors.Errorf("Object in GLACIER, restore first: %v", key) return nil, errors.Errorf("Object in GLACIER, restore first: %v", key)
@@ -1591,16 +1423,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
if o.fs.opt.ServerSideEncryption != "" { if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
} }
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass != "" { if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass req.StorageClass = &o.fs.opt.StorageClass
} }
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
_, err = uploader.Upload(&req) _, err = uploader.Upload(&req)
return shouldRetry(err)
})
if err != nil { if err != nil {
return err return err
} }
@@ -1618,10 +1444,7 @@ func (o *Object) Remove() error {
Bucket: &o.fs.bucket, Bucket: &o.fs.bucket,
Key: &key, Key: &key,
} }
err := o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.DeleteObject(&req) _, err := o.fs.c.DeleteObject(&req)
return shouldRetry(err)
})
return err return err
} }

View File

@@ -1,10 +1,10 @@
// Test S3 filesystem interface // Test S3 filesystem interface
package s3 package s3_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/backend/s3"
"github.com/ncw/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
@@ -12,15 +12,6 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestS3:", RemoteName: "TestS3:",
NilObject: (*Object)(nil), NilObject: (*s3.Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -44,7 +44,16 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
req.Header.Set("Date", date) req.Header.Set("Date", date)
// Sort out URI // Sort out URI
uri := req.URL.EscapedPath() uri := req.URL.Opaque
if uri != "" {
if strings.HasPrefix(uri, "//") {
// Strip off //host/uri
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
req.URL.Opaque = uri // reset to plain URI otherwise Ceph gets confused
}
} else {
uri = req.URL.Path
}
if uri == "" { if uri == "" {
uri = "/" uri = "/"
} }

View File

@@ -92,18 +92,7 @@ func init() {
}, { }, {
Name: "path_override", Name: "path_override",
Default: "", Default: "",
Help: `Override path used by SSH connection. Help: "Override path used by SSH connection.",
This allows checksum calculation when SFTP and SSH paths are
different. This issue affects among others Synology NAS boxes.
Shared folders can be found in directories representing volumes
rclone sync /home/local/directory remote:/directory --ssh-path-override /volume2/directory
Home directory can be found in a shared folder called "home"
rclone sync /home/local/directory remote:/home/directory --ssh-path-override /volume1/homes/USER/directory`,
Advanced: true, Advanced: true,
}, { }, {
Name: "set_modtime", Name: "set_modtime",
@@ -769,10 +758,6 @@ func (o *Object) Hash(r hash.Type) (string, error) {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
if o.fs.opt.DisableHashCheck {
return "", nil
}
c, err := o.fs.getSftpConnection() c, err := o.fs.getSftpConnection()
if err != nil { if err != nil {
return "", errors.Wrap(err, "Hash get SFTP connection") return "", errors.Wrap(err, "Hash get SFTP connection")

View File

@@ -29,27 +29,15 @@ import (
const ( const (
directoryMarkerContentType = "application/directory" // content type of directory marker objects directoryMarkerContentType = "application/directory" // content type of directory marker objects
listChunks = 1000 // chunk size to read directory listings listChunks = 1000 // chunk size to read directory listings
defaultChunkSize = 5 * fs.GibiByte
) )
// SharedOptions are shared between swift and hubic
var SharedOptions = []fs.Option{{
Name: "chunk_size",
Help: `Above this size files will be chunked into a _segments container.
Above this size files will be chunked into a _segments container. The
default for this is 5GB which is its maximum value.`,
Default: defaultChunkSize,
Advanced: true,
}}
// Register with Fs // Register with Fs
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
Name: "swift", Name: "swift",
Description: "Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)", Description: "Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
NewFs: NewFs, NewFs: NewFs,
Options: append([]fs.Option{{ Options: []fs.Option{{
Name: "env_auth", Name: "env_auth",
Help: "Get swift credentials from environment variables in standard OpenStack form.", Help: "Get swift credentials from environment variables in standard OpenStack form.",
Default: false, Default: false,
@@ -133,25 +121,11 @@ func init() {
Value: "admin", Value: "admin",
}}, }},
}, { }, {
Name: "storage_policy", Name: "chunk_size",
Help: `The storage policy to use when creating a new container Help: "Above this size files will be chunked into a _segments container.",
Default: fs.SizeSuffix(5 * 1024 * 1024 * 1024),
This applies the specified storage policy when creating a new Advanced: true,
container. The policy cannot be changed afterwards. The allowed
configuration values and their meaning depend on your Swift storage
provider.`,
Default: "",
Examples: []fs.OptionExample{{
Help: "Default",
Value: "",
}, {
Help: "OVH Public Cloud Storage",
Value: "pcs",
}, {
Help: "OVH Public Cloud Archive",
Value: "pca",
}}, }},
}}, SharedOptions...),
}) })
} }
@@ -170,7 +144,6 @@ type Options struct {
StorageURL string `config:"storage_url"` StorageURL string `config:"storage_url"`
AuthToken string `config:"auth_token"` AuthToken string `config:"auth_token"`
AuthVersion int `config:"auth_version"` AuthVersion int `config:"auth_version"`
StoragePolicy string `config:"storage_policy"`
EndpointType string `config:"endpoint_type"` EndpointType string `config:"endpoint_type"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
} }
@@ -228,7 +201,7 @@ func (f *Fs) Features() *fs.Features {
} }
// Pattern to match a swift path // Pattern to match a swift path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`) var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parseParse parses a swift 'url' // parseParse parses a swift 'url'
func parsePath(path string) (container, directory string, err error) { func parsePath(path string) (container, directory string, err error) {
@@ -289,37 +262,12 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
// provided by wrapping the existing auth, so we can just // provided by wrapping the existing auth, so we can just
// override one or the other or both. // override one or the other or both.
if StorageUrl != "" || AuthToken != "" { if StorageUrl != "" || AuthToken != "" {
// Re-write StorageURL and AuthToken if they are being
// overridden as c.Authenticate above will have
// overwritten them.
if StorageUrl != "" {
c.StorageUrl = StorageUrl
}
if AuthToken != "" {
c.AuthToken = AuthToken
}
c.Auth = newAuth(c.Auth, StorageUrl, AuthToken) c.Auth = newAuth(c.Auth, StorageUrl, AuthToken)
} }
return c, nil return c, nil
} }
func checkUploadChunkSize(cs fs.SizeSuffix) error { // NewFsWithConnection contstructs an Fs from the path, container:path
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// NewFsWithConnection constructs an Fs from the path, container:path
// and authenticated connection. // and authenticated connection.
// //
// if noCheckContainer is set then the Fs won't check the container // if noCheckContainer is set then the Fs won't check the container
@@ -369,10 +317,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "swift: chunk size")
}
c, err := swiftConnection(opt, name) c, err := swiftConnection(opt, name)
if err != nil { if err != nil {
@@ -639,11 +583,7 @@ func (f *Fs) Mkdir(dir string) error {
_, _, err = f.c.Container(f.container) _, _, err = f.c.Container(f.container)
} }
if err == swift.ContainerNotFound { if err == swift.ContainerNotFound {
headers := swift.Headers{} err = f.c.ContainerCreate(f.container, nil)
if f.opt.StoragePolicy != "" {
headers["X-Storage-Policy"] = f.opt.StoragePolicy
}
err = f.c.ContainerCreate(f.container, headers)
} }
if err == nil { if err == nil {
f.containerOK = true f.containerOK = true
@@ -940,11 +880,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
var err error var err error
_, _, err = o.fs.c.Container(o.fs.segmentsContainer) _, _, err = o.fs.c.Container(o.fs.segmentsContainer)
if err == swift.ContainerNotFound { if err == swift.ContainerNotFound {
headers := swift.Headers{} err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, nil)
if o.fs.opt.StoragePolicy != "" {
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
}
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
} }
if err != nil { if err != nil {
return "", err return "", err

View File

@@ -1,10 +1,10 @@
// Test Swift filesystem interface // Test Swift filesystem interface
package swift package swift_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/backend/swift"
"github.com/ncw/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
@@ -12,12 +12,6 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestSwift:", RemoteName: "TestSwift:",
NilObject: (*Object)(nil), NilObject: (*swift.Object)(nil),
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -1,423 +0,0 @@
package union
import (
"fmt"
"io"
"path"
"path/filepath"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "union",
Description: "A stackable unification remote, which can appear to merge the contents of several remotes",
NewFs: NewFs,
Options: []fs.Option{{
Name: "remotes",
Help: "List of space separated remotes.\nCan be 'remotea:test/dir remoteb:', '\"remotea:test/space dir\" remoteb:', etc.\nThe last remote is used to write to.",
Required: true,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Remotes fs.SpaceSepList `config:"remotes"`
}
// Fs represents a union of remotes
type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
root string // the path we are working on
remotes []fs.Fs // slice of remotes
wr fs.Fs // writable remote
hashSet hash.Set // intersection of hash types
}
// Object describes a union Object
//
// This is a wrapped object which returns the Union Fs as its parent
type Object struct {
fs.Object
fs *Fs // what this object is part of
}
// Wrap an existing object in the union Object
func (f *Fs) wrapObject(o fs.Object) *Object {
return &Object{
Object: o,
fs: f,
}
}
// Fs returns the union Fs as the parent
func (o *Object) Fs() fs.Info {
return o.fs
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("union root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(dir string) error {
return f.wr.Rmdir(dir)
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return f.hashSet
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(dir string) error {
return f.wr.Mkdir(dir)
}
// Purge all files in the root and the root directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge() error {
return f.wr.Features().Purge()
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
if src.Fs() != f.wr {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
o, err := f.wr.Features().Copy(src, remote)
if err != nil {
return nil, err
}
return f.wrapObject(o), nil
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
if src.Fs() != f.wr {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
o, err := f.wr.Features().Move(src, remote)
if err != nil {
return nil, err
}
return f.wrapObject(o), err
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
return f.wr.Features().DirMove(srcFs.wr, srcRemote, dstRemote)
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implemantion must empty the channel
// regulary. When the channel gets closed, the implemantion
// should stop polling and release resources.
func (f *Fs) ChangeNotify(fn func(string, fs.EntryType), ch <-chan time.Duration) {
var remoteChans []chan time.Duration
for _, remote := range f.remotes {
if ChangeNotify := remote.Features().ChangeNotify; ChangeNotify != nil {
ch := make(chan time.Duration)
remoteChans = append(remoteChans, ch)
ChangeNotify(fn, ch)
}
}
go func() {
for i := range ch {
for _, c := range remoteChans {
c <- i
}
}
for _, c := range remoteChans {
close(c)
}
}()
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
for _, remote := range f.remotes {
if DirCacheFlush := remote.Features().DirCacheFlush; DirCacheFlush != nil {
DirCacheFlush()
}
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.wr.Features().PutStream(in, src, options...)
if err != nil {
return nil, err
}
return f.wrapObject(o), err
}
// About gets quota information from the Fs
func (f *Fs) About() (*fs.Usage, error) {
return f.wr.Features().About()
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.wr.Put(in, src, options...)
if err != nil {
return nil, err
}
return f.wrapObject(o), err
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
set := make(map[string]fs.DirEntry)
found := false
for _, remote := range f.remotes {
var remoteEntries, err = remote.List(dir)
if err == fs.ErrorDirNotFound {
continue
}
if err != nil {
return nil, errors.Wrapf(err, "List failed on %v", remote)
}
found = true
for _, remoteEntry := range remoteEntries {
set[remoteEntry.Remote()] = remoteEntry
}
}
if !found {
return nil, fs.ErrorDirNotFound
}
for _, entry := range set {
if o, ok := entry.(fs.Object); ok {
entry = f.wrapObject(o)
}
entries = append(entries, entry)
}
return entries, nil
}
// NewObject creates a new remote union file object based on the first Object it finds (reverse remote order)
func (f *Fs) NewObject(path string) (fs.Object, error) {
for i := range f.remotes {
var remote = f.remotes[len(f.remotes)-i-1]
var obj, err = remote.NewObject(path)
if err == fs.ErrorObjectNotFound {
continue
}
if err != nil {
return nil, errors.Wrapf(err, "NewObject failed on %v", remote)
}
return f.wrapObject(obj), nil
}
return nil, fs.ErrorObjectNotFound
}
// Precision is the greatest Precision of all remotes
func (f *Fs) Precision() time.Duration {
var greatestPrecision time.Duration
for _, remote := range f.remotes {
if remote.Precision() > greatestPrecision {
greatestPrecision = remote.Precision()
}
}
return greatestPrecision
}
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if len(opt.Remotes) == 0 {
return nil, errors.New("union can't point to an empty remote - check the value of the remotes setting")
}
if len(opt.Remotes) == 1 {
return nil, errors.New("union can't point to a single remote - check the value of the remotes setting")
}
for _, remote := range opt.Remotes {
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point union remote at itself - check the value of the remote setting")
}
}
var remotes []fs.Fs
for i := range opt.Remotes {
// Last remote first so we return the correct (last) matching fs in case of fs.ErrorIsFile
var remote = opt.Remotes[len(opt.Remotes)-i-1]
_, configName, fsPath, err := fs.ParseRemote(remote)
if err != nil {
return nil, err
}
var rootString = path.Join(fsPath, filepath.ToSlash(root))
if configName != "local" {
rootString = configName + ":" + rootString
}
myFs, err := fs.NewFs(rootString)
if err != nil {
if err == fs.ErrorIsFile {
return myFs, err
}
return nil, err
}
remotes = append(remotes, myFs)
}
// Reverse the remotes again so they are in the order as before
for i, j := 0, len(remotes)-1; i < j; i, j = i+1, j-1 {
remotes[i], remotes[j] = remotes[j], remotes[i]
}
f := &Fs{
name: name,
root: root,
opt: *opt,
remotes: remotes,
wr: remotes[len(remotes)-1],
}
var features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
BucketBased: true,
SetTier: true,
GetTier: true,
}).Fill(f)
features = features.Mask(f.wr) // mask the features just on the writable fs
// FIXME maybe should be masking the bools here?
// Clear ChangeNotify and DirCacheFlush if all are nil
clearChangeNotify := true
clearDirCacheFlush := true
for _, remote := range f.remotes {
remoteFeatures := remote.Features()
if remoteFeatures.ChangeNotify != nil {
clearChangeNotify = false
}
if remoteFeatures.DirCacheFlush != nil {
clearDirCacheFlush = false
}
}
if clearChangeNotify {
features.ChangeNotify = nil
}
if clearDirCacheFlush {
features.DirCacheFlush = nil
}
f.features = features
// Get common intersection of hashes
hashSet := f.remotes[0].Hashes()
for _, remote := range f.remotes[1:] {
hashSet = hashSet.Overlap(remote.Hashes())
}
f.hashSet = hashSet
return f, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
)

View File

@@ -1,18 +0,0 @@
// Test Union filesystem interface
package union_test
import (
"testing"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestUnion:",
NilObject: nil,
SkipFsMatch: true,
})
}

View File

@@ -12,9 +12,6 @@ import (
const ( const (
// Wed, 27 Sep 2017 14:28:34 GMT // Wed, 27 Sep 2017 14:28:34 GMT
timeFormat = time.RFC1123 timeFormat = time.RFC1123
// The same as time.RFC1123 with optional leading zeros on the date
// see https://github.com/ncw/rclone/issues/2574
noZerosRFC1123 = "Mon, _2 Jan 2006 15:04:05 MST"
) )
// Multistatus contains responses returned from an HTTP 207 return code // Multistatus contains responses returned from an HTTP 207 return code
@@ -144,8 +141,6 @@ var timeFormats = []string{
timeFormat, // Wed, 27 Sep 2017 14:28:34 GMT (as per RFC) timeFormat, // Wed, 27 Sep 2017 14:28:34 GMT (as per RFC)
time.RFC1123Z, // Fri, 05 Jan 2018 14:14:38 +0000 (as used by mydrive.ch) time.RFC1123Z, // Fri, 05 Jan 2018 14:14:38 +0000 (as used by mydrive.ch)
time.UnixDate, // Wed May 17 15:31:58 UTC 2017 (as used in an internal server) time.UnixDate, // Wed May 17 15:31:58 UTC 2017 (as used in an internal server)
noZerosRFC1123, // Fri, 7 Sep 2018 08:49:58 GMT (as used by server in #2574)
time.RFC3339, // Wed, 31 Oct 2018 13:57:11 CET (as used by komfortcloud.de)
} }
// UnmarshalXML turns XML into a Time // UnmarshalXML turns XML into a Time
@@ -156,12 +151,6 @@ func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
return err return err
} }
// If time is missing then return the epoch
if v == "" {
*t = Time(time.Unix(0, 0))
return nil
}
// Parse the time format in multiple possible ways // Parse the time format in multiple possible ways
var newT time.Time var newT time.Time
for _, timeFormat := range timeFormats { for _, timeFormat := range timeFormats {

View File

@@ -1,32 +0,0 @@
package odrvcookie
import (
"time"
"github.com/ncw/rclone/lib/rest"
)
// CookieRenew holds information for the renew
type CookieRenew struct {
srv *rest.Client
timer *time.Ticker
renewFn func()
}
// NewRenew returns and starts a CookieRenew
func NewRenew(interval time.Duration, renewFn func()) *CookieRenew {
renew := CookieRenew{
timer: time.NewTicker(interval),
renewFn: renewFn,
}
go renew.Renew()
return &renew
}
// Renew calls the renewFn for every tick
func (c *CookieRenew) Renew() {
for {
<-c.timer.C // wait for tick
c.renewFn()
}
}

View File

@@ -47,7 +47,6 @@ const (
minSleep = 10 * time.Millisecond minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
defaultDepth = "1" // depth for PROPFIND
) )
// Register with Fs // Register with Fs
@@ -115,7 +114,6 @@ type Fs struct {
precision time.Duration // mod time precision precision time.Duration // mod time precision
canStream bool // set if can stream canStream bool // set if can stream
useOCMtime bool // set if can use X-OC-Mtime useOCMtime bool // set if can use X-OC-Mtime
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
} }
// Object describes a webdav object // Object describes a webdav object
@@ -184,13 +182,13 @@ func itemIsDir(item *api.Response) bool {
} }
// readMetaDataForPath reads the metadata from the path // readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(path string, depth string) (info *api.Prop, err error) { func (f *Fs) readMetaDataForPath(path string) (info *api.Prop, err error) {
// FIXME how do we read back additional properties? // FIXME how do we read back additional properties?
opts := rest.Opts{ opts := rest.Opts{
Method: "PROPFIND", Method: "PROPFIND",
Path: f.filePath(path), Path: f.filePath(path),
ExtraHeaders: map[string]string{ ExtraHeaders: map[string]string{
"Depth": depth, "Depth": "1",
}, },
NoRedirect: true, NoRedirect: true,
} }
@@ -204,9 +202,6 @@ func (f *Fs) readMetaDataForPath(path string, depth string) (info *api.Prop, err
// does not exist // does not exist
switch apiErr.StatusCode { switch apiErr.StatusCode {
case http.StatusNotFound: case http.StatusNotFound:
if f.retryWithZeroDepth && depth != "0" {
return f.readMetaDataForPath(path, "0")
}
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
case http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther: case http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther:
// Some sort of redirect - go doesn't deal with these properly (it resets // Some sort of redirect - go doesn't deal with these properly (it resets
@@ -372,24 +367,7 @@ func (f *Fs) setQuirks(vendor string) error {
if err != nil { if err != nil {
return err return err
} }
odrvcookie.NewRenew(12*time.Hour, func() {
spCookies, err := spCk.Cookies()
if err != nil {
fs.Errorf("could not renew cookies: %s", err.Error())
return
}
f.srv.SetCookie(&spCookies.FedAuth, &spCookies.RtFa) f.srv.SetCookie(&spCookies.FedAuth, &spCookies.RtFa)
fs.Debugf(spCookies, "successfully renewed sharepoint cookies")
})
f.srv.SetCookie(&spCookies.FedAuth, &spCookies.RtFa)
// sharepoint, unlike the other vendors, only lists files if the depth header is set to 0
// however, rclone defaults to 1 since it provides recursive directory listing
// to determine if we may have found a file, the request has to be resent
// with the depth set to 0
f.retryWithZeroDepth = true
case "other": case "other":
default: default:
fs.Debugf(f, "Unknown vendor %q", vendor) fs.Debugf(f, "Unknown vendor %q", vendor)
@@ -440,12 +418,12 @@ type listAllFn func(string, bool, *api.Prop) bool
// Lists the directory required calling the user function on each item found // Lists the directory required calling the user function on each item found
// //
// If the user fn ever returns true then it early exits with found = true // If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth string, fn listAllFn) (found bool, err error) { func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "PROPFIND", Method: "PROPFIND",
Path: f.dirPath(dir), // FIXME Should not start with / Path: f.dirPath(dir), // FIXME Should not start with /
ExtraHeaders: map[string]string{ ExtraHeaders: map[string]string{
"Depth": depth, "Depth": "1",
}, },
} }
var result api.Multistatus var result api.Multistatus
@@ -458,9 +436,6 @@ func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth str
if apiErr, ok := err.(*api.Error); ok { if apiErr, ok := err.(*api.Error); ok {
// does not exist // does not exist
if apiErr.StatusCode == http.StatusNotFound { if apiErr.StatusCode == http.StatusNotFound {
if f.retryWithZeroDepth && depth != "0" {
return f.listAll(dir, directoriesOnly, filesOnly, "0", fn)
}
return found, fs.ErrorDirNotFound return found, fs.ErrorDirNotFound
} }
} }
@@ -534,7 +509,7 @@ func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth str
// found. // found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
var iErr error var iErr error
_, err = f.listAll(dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool { _, err = f.listAll(dir, false, false, func(remote string, isDir bool, info *api.Prop) bool {
if isDir { if isDir {
d := fs.NewDir(remote, time.Time(info.Modified)) d := fs.NewDir(remote, time.Time(info.Modified))
// .SetID(info.ID) // .SetID(info.ID)
@@ -650,7 +625,7 @@ func (f *Fs) Mkdir(dir string) error {
// //
// if the directory does not exist then err will be ErrorDirNotFound // if the directory does not exist then err will be ErrorDirNotFound
func (f *Fs) dirNotEmpty(dir string) (found bool, err error) { func (f *Fs) dirNotEmpty(dir string) (found bool, err error) {
return f.listAll(dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool { return f.listAll(dir, false, false, func(remote string, isDir bool, info *api.Prop) bool {
return true return true
}) })
} }
@@ -901,7 +876,7 @@ func (o *Object) readMetaData() (err error) {
if o.hasMetaData { if o.hasMetaData {
return nil return nil
} }
info, err := o.fs.readMetaDataForPath(o.remote, defaultDepth) info, err := o.fs.readMetaDataForPath(o.remote)
if err != nil { if err != nil {
return err return err
} }
@@ -968,7 +943,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
Body: in, Body: in,
NoResponse: true, NoResponse: true,
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365 ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
ContentType: fs.MimeType(src),
} }
if o.fs.useOCMtime { if o.fs.useOCMtime {
opts.ExtraHeaders = map[string]string{ opts.ExtraHeaders = map[string]string{
@@ -980,14 +954,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
// Give the WebDAV server a chance to get its internal state in order after the
// error. The error may have been local in which case we closed the connection.
// The server may still be dealing with it for a moment. A sleep isn't ideal but I
// haven't been able to think of a better method to find out if the server has
// finished - ncw
time.Sleep(1 * time.Second)
// Remove failed upload
_ = o.Remove()
return err return err
} }
// read metadata from remote // read metadata from remote

View File

@@ -67,9 +67,11 @@ var defaultErrorHandler ErrorHandler = func(resp *http.Response) error {
func (HTTPRequest *HTTPRequest) run(client *Client) ([]byte, error) { func (HTTPRequest *HTTPRequest) run(client *Client) ([]byte, error) {
var err error var err error
values := make(url.Values) values := make(url.Values)
if HTTPRequest.Parameters != nil {
for k, v := range HTTPRequest.Parameters { for k, v := range HTTPRequest.Parameters {
values.Set(k, fmt.Sprintf("%v", v)) values.Set(k, fmt.Sprintf("%v", v))
} }
}
var req *http.Request var req *http.Request
if HTTPRequest.Method == "POST" { if HTTPRequest.Method == "POST" {

View File

@@ -29,7 +29,7 @@ func (c *Client) PerformDelete(url string) error {
if err != nil { if err != nil {
return err return err
} }
return errors.Errorf("delete error [%d]: %s", resp.StatusCode, string(body)) return errors.Errorf("delete error [%d]: %s", resp.StatusCode, string(body[:]))
} }
return nil return nil
} }

View File

@@ -34,7 +34,7 @@ func (c *Client) PerformDownload(url string, headers map[string]string) (out io.
if err != nil { if err != nil {
return nil, err return nil, err
} }
return nil, errors.Errorf("download error [%d]: %s", resp.StatusCode, string(body)) return nil, errors.Errorf("download error [%d]: %s", resp.StatusCode, string(body[:]))
} }
return resp.Body, err return resp.Body, err
} }

View File

@@ -28,7 +28,7 @@ func (c *Client) PerformMkdir(url string) (int, string, error) {
return 0, "", err return 0, "", err
} }
//third parameter is the json error response body //third parameter is the json error response body
return resp.StatusCode, string(body), errors.Errorf("create folder error [%d]: %s", resp.StatusCode, string(body)) return resp.StatusCode, string(body[:]), errors.Errorf("create folder error [%d]: %s", resp.StatusCode, string(body[:]))
} }
return resp.StatusCode, "", nil return resp.StatusCode, "", nil
} }

View File

@@ -32,7 +32,7 @@ func (c *Client) PerformUpload(url string, data io.Reader, contentType string) (
return err return err
} }
return errors.Errorf("upload error [%d]: %s", resp.StatusCode, string(body)) return errors.Errorf("upload error [%d]: %s", resp.StatusCode, string(body[:]))
} }
return nil return nil
} }

View File

@@ -165,11 +165,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
//return err //return err
} else { } else {
if ResourceInfoResponse.ResourceType == "file" { if ResourceInfoResponse.ResourceType == "file" {
rootDir := path.Dir(root) f.setRoot(path.Dir(root))
if rootDir == "." {
rootDir = ""
}
f.setRoot(rootDir)
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
return f, fs.ErrorIsFile return f, fs.ErrorIsFile
} }

View File

@@ -1,23 +0,0 @@
#!/bin/bash
# An example script to run when bisecting go with git bisect -run when
# looking for an rclone regression
# Run this from the go root
set -e
# Compile the go version
cd src
./make.bash || exit 125
# Make sure we are using it
source ~/bin/use-go1.11
go version
# Compile rclone
cd ~/go/src/github.com/ncw/rclone
make
# run the failing test
go run -race race.go

View File

@@ -1,33 +0,0 @@
#!/bin/bash
# Example script for git bisect run
#
# Copy this file into /tmp say before running as it will be
# overwritten by the bisect as it is checked in.
#
# Change the test below to find out whether rclone is working or not
#
# Run from the project root
#
# git bisect start
# git checkout master
# git bisect bad
# git checkout v1.41 (or whatever is the first good one)
# git bisect good
# git bisect run /tmp/bisect-rclone.sh
set -e
# Compile notifying git on compile failure
make || exit 125
rclone version
# Test whatever it is that is going wrong - exit with non zero exit code on failure
# commented out examples follow
# truncate -s 10M /tmp/10M
# rclone delete azure:rclone-test1/10M || true
# rclone --retries 1 copyto -vv /tmp/10M azure:rclone-test1/10M --azureblob-upload-cutoff 1M
# rm -f "/tmp/tests's.docx" || true
# rclone -vv --retries 1 copy "drive:test/tests's.docx" /tmp

View File

@@ -1,60 +0,0 @@
#!/usr/bin/env python2
"""
Make backend documentation
"""
import os
import subprocess
marker = "<!--- autogenerated options"
start = marker + " start"
stop = marker + " stop"
def find_backends():
"""Return a list of all backends"""
return [ x for x in os.listdir("backend") if x not in ("all",) ]
def output_docs(backend, out):
"""Output documentation for backend options to out"""
out.flush()
subprocess.check_call(["rclone", "help", "backend", backend], stdout=out)
def alter_doc(backend):
"""Alter the documentation for backend"""
doc_file = "docs/content/"+backend+".md"
if not os.path.exists(doc_file):
raise ValueError("Didn't find doc file %s" % (doc_file,))
new_file = doc_file+"~new~"
altered = False
with open(doc_file, "r") as in_file, open(new_file, "w") as out_file:
in_docs = False
for line in in_file:
if not in_docs:
if start in line:
in_docs = True
start_full = start + " - DO NOT EDIT, instead edit fs.RegInfo in backend/%s/%s.go then run make backenddocs -->\n" % (backend, backend)
out_file.write(start_full)
output_docs(backend, out_file)
out_file.write(stop+" -->\n")
altered = True
if not in_docs:
out_file.write(line)
if in_docs:
if stop in line:
in_docs = False
os.rename(doc_file, doc_file+"~")
os.rename(new_file, doc_file)
if not altered:
raise ValueError("Didn't find '%s' markers for in %s" % (start, doc_file))
if __name__ == "__main__":
failed, success = 0, 0
for backend in find_backends():
try:
alter_doc(backend)
except Exception, e:
print "Failed adding docs for %s backend: %s" % (backend, e)
failed += 1
else:
success += 1
print "Added docs for %d backends with %d failures" % (success, failed)

View File

@@ -1,173 +0,0 @@
#!/usr/bin/python
"""
Generate a markdown changelog for the rclone project
"""
import os
import sys
import re
import datetime
import subprocess
from collections import defaultdict
IGNORE_RES = [
r"^Add .* to contributors$",
r"^Start v\d+.\d+-DEV development$",
r"^Version v\d.\d+$",
]
IGNORE_RE = re.compile("(?:" + "|".join(IGNORE_RES) + ")")
CATEGORY = re.compile(r"(^[\w/ ]+(?:, *[\w/ ]+)*):\s*(.*)$")
backends = [ x for x in os.listdir("backend") if x != "all"]
backend_aliases = {
"amazon cloud drive" : "amazonclouddrive",
"acd" : "amazonclouddrive",
"google cloud storage" : "googlecloudstorage",
"gcs" : "googlecloudstorage",
"azblob" : "azureblob",
"mountlib": "mount",
"cmount": "mount",
"mount/cmount": "mount",
}
backend_titles = {
"amazonclouddrive": "Amazon Cloud Drive",
"googlecloudstorage": "Google Cloud Storage",
"azureblob": "Azure Blob",
"ftp": "FTP",
"sftp": "SFTP",
"http": "HTTP",
"webdav": "WebDAV",
}
STRIP_FIX_RE = re.compile(r"(\s+-)?\s+((fixes|addresses)\s+)?#\d+", flags=re.I)
STRIP_PATH_RE = re.compile(r"^(backend|fs)/")
IS_FIX_RE = re.compile(r"\b(fix|fixes)\b", flags=re.I)
def make_out(data, indent=""):
"""Return a out, lines the first being a function for output into the second"""
out_lines = []
def out(category, title=None):
if title == None:
title = category
lines = data.get(category)
if not lines:
return
del(data[category])
if indent != "" and len(lines) == 1:
out_lines.append(indent+"* " + title+": " + lines[0])
return
out_lines.append(indent+"* " + title)
for line in lines:
out_lines.append(indent+" * " + line)
return out, out_lines
def process_log(log):
"""Process the incoming log into a category dict of lists"""
by_category = defaultdict(list)
for log_line in reversed(log.split("\n")):
log_line = log_line.strip()
hash, author, timestamp, message = log_line.split("|", 3)
message = message.strip()
if IGNORE_RE.search(message):
continue
match = CATEGORY.search(message)
categories = "UNKNOWN"
if match:
categories = match.group(1).lower()
message = match.group(2)
message = STRIP_FIX_RE.sub("", message)
message = message +" ("+author+")"
message = message[0].upper()+message[1:]
seen = set()
for category in categories.split(","):
category = category.strip()
category = STRIP_PATH_RE.sub("", category)
category = backend_aliases.get(category, category)
if category in seen:
continue
by_category[category].append(message)
seen.add(category)
#print category, hash, author, timestamp, message
return by_category
def main():
if len(sys.argv) != 3:
print >>sys.stderr, "Syntax: %s vX.XX vX.XY" % sys.argv[0]
sys.exit(1)
version, next_version = sys.argv[1], sys.argv[2]
log = subprocess.check_output(["git", "log", '''--pretty=format:%H|%an|%aI|%s'''] + [version+".."+next_version])
by_category = process_log(log)
# Output backends first so remaining in by_category are core items
out, backend_lines = make_out(by_category)
out("mount", title="Mount")
out("vfs", title="VFS")
out("local", title="Local")
out("cache", title="Cache")
out("crypt", title="Crypt")
backend_names = sorted(x for x in by_category.keys() if x in backends)
for backend_name in backend_names:
if backend_name in backend_titles:
backend_title = backend_titles[backend_name]
else:
backend_title = backend_name.title()
out(backend_name, title=backend_title)
# Split remaining in by_category into new features and fixes
new_features = defaultdict(list)
bugfixes = defaultdict(list)
for name, messages in by_category.iteritems():
for message in messages:
if IS_FIX_RE.search(message):
bugfixes[name].append(message)
else:
new_features[name].append(message)
# Output new features
out, new_features_lines = make_out(new_features, indent=" ")
for name in sorted(new_features.keys()):
out(name)
# Output bugfixes
out, bugfix_lines = make_out(bugfixes, indent=" ")
for name in sorted(bugfixes.keys()):
out(name)
# Read old changlog and split
with open("docs/content/changelog.md") as fd:
old_changelog = fd.read()
heading = "# Changelog"
i = old_changelog.find(heading)
if i < 0:
raise AssertionError("Couldn't find heading in old changelog")
i += len(heading)
old_head, old_tail = old_changelog[:i], old_changelog[i:]
# Update the build date
old_head = re.sub(r"\d\d\d\d-\d\d-\d\d", str(datetime.date.today()), old_head)
# Output combined changelog with new part
sys.stdout.write(old_head)
sys.stdout.write("""
## %s - %s
* New backends
* New commands
* New Features
%s
* Bug Fixes
%s
%s""" % (next_version, datetime.date.today(), "\n".join(new_features_lines), "\n".join(bugfix_lines), "\n".join(backend_lines)))
sys.stdout.write(old_tail)
if __name__ == "__main__":
main()

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env python2 #!/usr/bin/env python
""" """
Make single page versions of the documentation for release and Make single page versions of the documentation for release and
conversion into man pages etc. conversion into man pages etc.
@@ -35,7 +35,6 @@ docs = [
"drive.md", "drive.md",
"http.md", "http.md",
"hubic.md", "hubic.md",
"jottacloud.md",
"mega.md", "mega.md",
"azureblob.md", "azureblob.md",
"onedrive.md", "onedrive.md",
@@ -44,7 +43,6 @@ docs = [
"swift.md", "swift.md",
"pcloud.md", "pcloud.md",
"sftp.md", "sftp.md",
"union.md",
"webdav.md", "webdav.md",
"yandex.md", "yandex.md",

View File

@@ -4,20 +4,18 @@
set -e set -e
go install go install
mkdir -p /tmp/rclone/cache_test mkdir -p /tmp/rclone_cache_test
mkdir -p /tmp/rclone/rc_mount
export RCLONE_CONFIG_RCDOCS_TYPE=cache export RCLONE_CONFIG_RCDOCS_TYPE=cache
export RCLONE_CONFIG_RCDOCS_REMOTE=/tmp/rclone/cache_test export RCLONE_CONFIG_RCDOCS_REMOTE=/tmp/rclone/cache_test
rclone -q --rc mount rcdocs: /tmp/rclone/rc_mount & rclone -q --rc mount rcdocs: /mnt/tmp/ &
sleep 0.5 sleep 0.5
rclone rc > /tmp/rclone/z.md rclone rc > /tmp/z.md
fusermount -u -z /tmp/rclone/rc_mount > /dev/null 2>&1 || umount /tmp/rclone/rc_mount fusermount -z -u /mnt/tmp/
awk ' awk '
BEGIN {p=1} BEGIN {p=1}
/^<!--- autogenerated start/ {print;system("cat /tmp/rclone/z.md");p=0} /^<!--- autogenerated start/ {print;system("cat /tmp/z.md");p=0}
/^<!--- autogenerated stop/ {p=1} /^<!--- autogenerated stop/ {p=1}
p' docs/content/rc.md > /tmp/rclone/rc.md p' docs/content/rc.md > /tmp/rc.md
mv /tmp/rclone/rc.md docs/content/rc.md mv /tmp/rc.md docs/content/rc.md
rm -rf /tmp/rclone

View File

@@ -1,17 +0,0 @@
#!/bin/sh
# Use this script after a release to tidy the betas
version="$1"
if [ "$version" = "" ]; then
echo "Syntax: $0 <version> [delete]"
exit 1
fi
dry_run="--dry-run"
if [ "$2" = "delete" ]; then
dry_run=""
else
echo "Running in --dry-run mode"
echo "Use '$0 $version delete' to actually delete files"
fi
rclone ${dry_run} --fast-list -P --checkers 16 --transfers 16 delete --include "**/${version}**" memstore:beta-rclone-org

Some files were not shown because too many files have changed in this diff Show More