mirror of
https://github.com/rclone/rclone.git
synced 2026-01-11 21:13:35 +00:00
Compare commits
1 Commits
crypt-pass
...
fix-2187
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6f86549ccb |
@@ -4,9 +4,6 @@ os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\ncw\rclone
|
||||
|
||||
cache:
|
||||
- '%LocalAppData%\go-build'
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
|
||||
@@ -46,4 +43,4 @@ artifacts:
|
||||
- path: build/*-v*.zip
|
||||
|
||||
deploy_script:
|
||||
- IF "%APPVEYOR_REPO_NAME%" == "ncw/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||
- IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
---
|
||||
version: 2
|
||||
|
||||
jobs:
|
||||
@@ -14,10 +13,10 @@ jobs:
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull rclone/xgo-cgofuse
|
||||
docker pull billziss/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
--image=rclone/xgo-cgofuse \
|
||||
--image=billziss/xgo-cgofuse \
|
||||
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
.
|
||||
@@ -30,21 +29,6 @@ jobs:
|
||||
command: |
|
||||
mkdir -p /tmp/rclone.dist
|
||||
cp -R rclone-* /tmp/rclone.dist
|
||||
mkdir build
|
||||
cp -R rclone-* build/
|
||||
|
||||
- run:
|
||||
name: Build rclone
|
||||
command: |
|
||||
go version
|
||||
go build
|
||||
|
||||
- run:
|
||||
name: Upload artifacts
|
||||
command: |
|
||||
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
|
||||
make circleci_upload
|
||||
fi
|
||||
|
||||
- store_artifacts:
|
||||
path: /tmp/rclone.dist
|
||||
|
||||
31
.github/ISSUE_TEMPLATE.md
vendored
31
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,31 +0,0 @@
|
||||
<!--
|
||||
|
||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue for a quick response.
|
||||
|
||||
If you are reporting a bug or asking for a new feature then please use one of the templates here:
|
||||
|
||||
https://github.com/ncw/rclone/issues/new
|
||||
|
||||
otherwise fill in the form below.
|
||||
|
||||
Thank you
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
|
||||
#### Output of `rclone version`
|
||||
|
||||
|
||||
|
||||
#### Describe the issue
|
||||
|
||||
|
||||
|
||||
50
.github/ISSUE_TEMPLATE/Bug.md
vendored
50
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -1,50 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a problem with rclone
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue for a quick response.
|
||||
|
||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
||||
|
||||
https://beta.rclone.org/
|
||||
|
||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
||||
|
||||
Thank you
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
#### What is the problem you are having with rclone?
|
||||
|
||||
|
||||
|
||||
#### What is your rclone version (output from `rclone version`)
|
||||
|
||||
|
||||
|
||||
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
|
||||
|
||||
|
||||
#### Which cloud storage system are you using? (eg Google Drive)
|
||||
|
||||
|
||||
|
||||
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
|
||||
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
36
.github/ISSUE_TEMPLATE/Feature.md
vendored
36
.github/ISSUE_TEMPLATE/Feature.md
vendored
@@ -1,36 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest a new feature or enhancement for rclone
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
Welcome :-)
|
||||
|
||||
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||
|
||||
Here is a checklist of things to do:
|
||||
|
||||
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
|
||||
2. Discuss on the forum first: https://forum.rclone.org/
|
||||
3. Make a feature request issue (this is the right place!).
|
||||
4. Be prepared to get involved making the feature :-)
|
||||
|
||||
Looking forward to your great idea!
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
|
||||
#### What is your current rclone version (output from `rclone version`)?
|
||||
|
||||
|
||||
|
||||
#### What problem are you are trying to solve?
|
||||
|
||||
|
||||
|
||||
#### How do you think rclone should be changed to solve that?
|
||||
|
||||
|
||||
29
.github/PULL_REQUEST_TEMPLATE.md
vendored
29
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,29 +0,0 @@
|
||||
<!--
|
||||
Thank you very much for contributing code or documentation to rclone! Please
|
||||
fill out the following questions to make it easier for us to review your
|
||||
changes.
|
||||
|
||||
You do not need to check all the boxes below all at once, feel free to take
|
||||
your time and add more commits. If you're done and ready for review, please
|
||||
check the last box.
|
||||
-->
|
||||
|
||||
#### What is the purpose of this change?
|
||||
|
||||
<!--
|
||||
Describe the changes here
|
||||
-->
|
||||
|
||||
#### Was the change discussed in an issue or in the forum before?
|
||||
|
||||
<!--
|
||||
Link issues and relevant forum posts here.
|
||||
-->
|
||||
|
||||
#### Checklist
|
||||
|
||||
- [ ] I have read the [contribution guidelines](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||
- [ ] I have added tests for all changes in this PR if appropriate.
|
||||
- [ ] I have added documentation for the changes if appropriate.
|
||||
- [ ] All commit messages are in [house style](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||
- [ ] I'm done, this Pull Request is ready for review :-)
|
||||
20
.travis.yml
20
.travis.yml
@@ -4,12 +4,11 @@ dist: trusty
|
||||
os:
|
||||
- linux
|
||||
go:
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.7.6
|
||||
- 1.8.7
|
||||
- 1.9.3
|
||||
- "1.10.1"
|
||||
- tip
|
||||
go_import_path: github.com/ncw/rclone
|
||||
before_install:
|
||||
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
|
||||
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
|
||||
@@ -34,25 +33,18 @@ addons:
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
include:
|
||||
- os: osx
|
||||
go: 1.11.x
|
||||
go: "1.10.1"
|
||||
env: GOTAGS=""
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/Library/Caches/go-build
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: ncw/rclone
|
||||
all_branches: true
|
||||
go: 1.11.x
|
||||
go: "1.10.1"
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
|
||||
119
CONTRIBUTING.md
119
CONTRIBUTING.md
@@ -21,19 +21,19 @@ with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
## Submitting a pull request ##
|
||||
|
||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||
like to implement then please submit a pull request via GitHub.
|
||||
like to implement then please submit a pull request via Github.
|
||||
|
||||
If it is a big feature then make an issue first so it can be discussed.
|
||||
|
||||
You'll need a Go environment set up with GOPATH set. See [the Go
|
||||
getting started docs](https://golang.org/doc/install) for more info.
|
||||
|
||||
First in your web browser press the fork button on [rclone's GitHub
|
||||
First in your web browser press the fork button on [rclone's Github
|
||||
page](https://github.com/ncw/rclone).
|
||||
|
||||
Now in your terminal
|
||||
|
||||
go get -u github.com/ncw/rclone
|
||||
go get github.com/ncw/rclone
|
||||
cd $GOPATH/src/github.com/ncw/rclone
|
||||
git remote rename origin upstream
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
@@ -64,31 +64,22 @@ packages which you can install with
|
||||
|
||||
Make sure you
|
||||
|
||||
* Add [documentation](#writing-documentation) for a new feature.
|
||||
* Follow the [commit message guidelines](#commit-messages).
|
||||
* Add [unit tests](#testing) for a new feature
|
||||
* Add documentation for a new feature (see below for where)
|
||||
* Add unit tests for a new feature
|
||||
* squash commits down to one per feature
|
||||
* rebase to master with `git rebase master`
|
||||
* rebase to master `git rebase master`
|
||||
|
||||
When you are done with that
|
||||
|
||||
git push origin my-new-feature
|
||||
git push origin my-new-feature
|
||||
|
||||
Go to the GitHub website and click [Create pull
|
||||
Go to the Github website and click [Create pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
You patch will get reviewed and you might get asked to fix some stuff.
|
||||
|
||||
If so, then make the changes in the same branch, squash the commits,
|
||||
rebase it to master then push it to GitHub with `--force`.
|
||||
|
||||
## Enabling CI for your fork ##
|
||||
|
||||
The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
|
||||
|
||||
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
|
||||
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
|
||||
websites, find your fork of rclone, and enable building there.
|
||||
rebase it to master then push it to Github with `--force`.
|
||||
|
||||
## Testing ##
|
||||
|
||||
@@ -123,13 +114,6 @@ but they can be run against any of the remotes.
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
|
||||
If you want to use the integration test framework to run these tests
|
||||
all together with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/ncw/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
|
||||
@@ -182,21 +166,17 @@ with modules beneath.
|
||||
* pacer - retries with backoff and paces operations
|
||||
* readers - a selection of useful io.Readers
|
||||
* rest - a thin abstraction over net/http for REST
|
||||
* vendor - 3rd party code managed by `go mod`
|
||||
* vendor - 3rd party code managed by the dep tool
|
||||
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||
|
||||
## Writing Documentation ##
|
||||
|
||||
If you are adding a new feature then please update the documentation.
|
||||
|
||||
If you add a new general flag (not for a backend), then document it in
|
||||
If you add a new flag, then if it is a general flag, document it in
|
||||
`docs/content/docs.md` - the flags there are supposed to be in
|
||||
alphabetical order.
|
||||
|
||||
If you add a new backend option/flag, then it should be documented in
|
||||
the source file in the `Help:` field. The first line of this is used
|
||||
for the flag help, the remainder is shown to the user in `rclone
|
||||
config` and is added to the docs with `make backenddocs`.
|
||||
alphabetical order. If it is a remote specific flag, then document it
|
||||
in `docs/content/remote.md`.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The MANUAL.*, rclone.1, web site etc are all auto generated
|
||||
@@ -215,20 +195,14 @@ file.
|
||||
## Commit messages ##
|
||||
|
||||
Please make the first line of your commit message a summary of the
|
||||
change that a user (not a developer) of rclone would like to read, and
|
||||
prefix it with the directory of the change followed by a colon. The
|
||||
changelog gets made by looking at just these first lines so make it
|
||||
good!
|
||||
change, and prefix it with the directory of the change followed by a
|
||||
colon. The changelog gets made by looking at just these first lines
|
||||
so make it good!
|
||||
|
||||
If you have more to say about the commit, then enter a blank line and
|
||||
carry on the description. Remember to say why the change was needed -
|
||||
the commit itself shows what was changed.
|
||||
|
||||
Writing more is better than less. Comparing the behaviour before the
|
||||
change to that after the change is very useful. Imagine you are
|
||||
writing to yourself in 12 months time when you've forgotten everything
|
||||
about what you just did and you need to get up to speed quickly.
|
||||
|
||||
If the change fixes an issue then write `Fixes #1234` in the commit
|
||||
message. This can be on the subject line if it will fit. If you
|
||||
don't want to close the associated issue just put `#1234` and the
|
||||
@@ -255,53 +229,37 @@ Fixes #1498
|
||||
|
||||
## Adding a dependency ##
|
||||
|
||||
rclone uses the [go
|
||||
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
||||
support in go1.11 and later to manage its dependencies.
|
||||
rclone uses the [dep](https://github.com/golang/dep) tool to manage
|
||||
its dependencies. All code that rclone needs for building is stored
|
||||
in the `vendor` directory for perfectly reproducable builds.
|
||||
|
||||
**NB** you must be using go1.11 or above to add a dependency to
|
||||
rclone. Rclone will still build with older versions of go, but we use
|
||||
the `go mod` command for dependencies which is only in go1.11 and
|
||||
above.
|
||||
The `vendor` directory is entirely managed by the `dep` tool.
|
||||
|
||||
rclone can be built with modules outside of the GOPATH, but for
|
||||
backwards compatibility with older go versions, rclone also maintains
|
||||
a `vendor` directory with all the external code rclone needs for
|
||||
building.
|
||||
To add a new dependency, run `dep ensure` and `dep` will pull in the
|
||||
new dependency to the `vendor` directory and update the `Gopkg.lock`
|
||||
file.
|
||||
|
||||
The `vendor` directory is entirely managed by the `go mod` tool, do
|
||||
not add things manually.
|
||||
You can add constraints on that package in the `Gopkg.toml` file (see
|
||||
the `dep` documentation), but don't unless you really need to.
|
||||
|
||||
To add a dependency `github.com/ncw/new_dependency` see the
|
||||
instructions below. These will fetch the dependency, add it to
|
||||
`go.mod` and `go.sum` and vendor it for older go versions.
|
||||
|
||||
GO111MODULE=on go get github.com/ncw/new_dependency
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
You can add constraints on that package when doing `go get` (see the
|
||||
go docs linked above), but don't unless you really need to.
|
||||
|
||||
Please check in the changes generated by `go mod` including the
|
||||
`vendor` directory and `go.mod` and `go.sum` in a single commit
|
||||
separate from any other code changes with the title "vendor: add
|
||||
github.com/ncw/new_dependency". Remember to `git add` any new files
|
||||
in `vendor`.
|
||||
Please check in the changes generated by `dep` including the `vendor`
|
||||
directory and `Godep.toml` and `Godep.lock` in a single commit
|
||||
separate from any other code changes. Watch out for new files in
|
||||
`vendor`.
|
||||
|
||||
## Updating a dependency ##
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
GO111MODULE=on go get -u github.com/pkg/errors
|
||||
GO111MODULE=on go mod vendor
|
||||
dep ensure -update github.com/pkg/errors
|
||||
|
||||
Check in in a single commit as above.
|
||||
|
||||
## Updating all the dependencies ##
|
||||
|
||||
In order to update all the dependencies then run `make update`. This
|
||||
just uses the go modules to update all the modules to their latest
|
||||
stable release. Check in the changes in a single commit as above.
|
||||
just runs `dep ensure -update`. Check in the changes in a single
|
||||
commit as above.
|
||||
|
||||
This should be done early in the release cycle to pick up new versions
|
||||
of packages in time for them to get some testing.
|
||||
@@ -350,13 +308,7 @@ Unit tests
|
||||
|
||||
Integration tests
|
||||
|
||||
* Add your backend to `fstest/test_all/config.yaml`
|
||||
* Once you've done that then you can use the integration test framework from the project root:
|
||||
* go install ./...
|
||||
* test_all -backend remote
|
||||
|
||||
Or if you want to run the integration tests manually:
|
||||
|
||||
* Add your fs to `fstest/test_all/test_all.go`
|
||||
* Make sure integration tests pass with
|
||||
* `cd fs/operations`
|
||||
* `go test -v -remote TestRemote:`
|
||||
@@ -371,10 +323,11 @@ See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
|
||||
|
||||
* `README.md` - main GitHub page
|
||||
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
* `README.md` - main Github page
|
||||
* `docs/content/remote.md` - main docs page
|
||||
* `docs/content/overview.md` - overview docs
|
||||
* `docs/content/docs.md` - list of remotes in config section
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
* `cmd/cmd.go` - the main help for rclone
|
||||
|
||||
490
Gopkg.lock
generated
Normal file
490
Gopkg.lock
generated
Normal file
@@ -0,0 +1,490 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "bazil.org/fuse"
|
||||
packages = [
|
||||
".",
|
||||
"fs",
|
||||
"fuseutil"
|
||||
]
|
||||
revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e"
|
||||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479"
|
||||
version = "v0.23.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = [
|
||||
"storage",
|
||||
"version"
|
||||
]
|
||||
revision = "fbe7db0e3f9793ba3e5704efbab84f51436c136e"
|
||||
version = "v18.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = [
|
||||
"autorest",
|
||||
"autorest/adal",
|
||||
"autorest/azure",
|
||||
"autorest/date"
|
||||
]
|
||||
revision = "1f7cd6cfe0adea687ad44a512dfe76140f804318"
|
||||
version = "v10.12.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/Unknwon/goconfig"
|
||||
packages = ["."]
|
||||
revision = "ef1e4c783f8f0478bd8bff0edb3dd0bade552599"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/VividCortex/ewma"
|
||||
packages = ["."]
|
||||
revision = "b24eb346a94c3ba12c1da1e564dbac1b498a77ce"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/a8m/tree"
|
||||
packages = ["."]
|
||||
revision = "3cf936ce15d6100c49d9c75f79c220ae7e579599"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/abbot/go-http-auth"
|
||||
packages = ["."]
|
||||
revision = "0ddd408d5d60ea76e320503cc7dd091992dee608"
|
||||
version = "v0.4.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = [
|
||||
"aws",
|
||||
"aws/awserr",
|
||||
"aws/awsutil",
|
||||
"aws/client",
|
||||
"aws/client/metadata",
|
||||
"aws/corehandlers",
|
||||
"aws/credentials",
|
||||
"aws/credentials/ec2rolecreds",
|
||||
"aws/credentials/endpointcreds",
|
||||
"aws/credentials/stscreds",
|
||||
"aws/csm",
|
||||
"aws/defaults",
|
||||
"aws/ec2metadata",
|
||||
"aws/endpoints",
|
||||
"aws/request",
|
||||
"aws/session",
|
||||
"aws/signer/v4",
|
||||
"internal/sdkio",
|
||||
"internal/sdkrand",
|
||||
"internal/shareddefaults",
|
||||
"private/protocol",
|
||||
"private/protocol/eventstream",
|
||||
"private/protocol/eventstream/eventstreamapi",
|
||||
"private/protocol/query",
|
||||
"private/protocol/query/queryutil",
|
||||
"private/protocol/rest",
|
||||
"private/protocol/restxml",
|
||||
"private/protocol/xml/xmlutil",
|
||||
"service/s3",
|
||||
"service/s3/s3iface",
|
||||
"service/s3/s3manager",
|
||||
"service/sts"
|
||||
]
|
||||
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
|
||||
version = "v1.14.8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/billziss-gh/cgofuse"
|
||||
packages = ["fuse"]
|
||||
revision = "ea66f9809c71af94522d494d3d617545662ea59d"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/bbolt"
|
||||
packages = ["."]
|
||||
revision = "af9db2027c98c61ecd8e17caa5bd265792b9b9a2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cpuguy83/go-md2man"
|
||||
packages = ["md2man"]
|
||||
revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1"
|
||||
version = "v1.0.8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/djherbis/times"
|
||||
packages = ["."]
|
||||
revision = "95292e44976d1217cf3611dc7c8d9466877d3ed5"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
|
||||
packages = [
|
||||
"dropbox",
|
||||
"dropbox/async",
|
||||
"dropbox/common",
|
||||
"dropbox/file_properties",
|
||||
"dropbox/files",
|
||||
"dropbox/seen_state",
|
||||
"dropbox/sharing",
|
||||
"dropbox/team_common",
|
||||
"dropbox/team_policies",
|
||||
"dropbox/users",
|
||||
"dropbox/users_common"
|
||||
]
|
||||
revision = "7afa861bfde5a348d765522b303b6fbd9d250155"
|
||||
version = "v4.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
|
||||
version = "v1.37.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/go-querystring"
|
||||
packages = ["query"]
|
||||
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/jlaffaye/ftp"
|
||||
packages = ["."]
|
||||
revision = "2403248fa8cc9f7909862627aa7337f13f8e0bf1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
revision = "0b12d6b5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kardianos/osext"
|
||||
packages = ["."]
|
||||
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/kr/fs"
|
||||
packages = ["."]
|
||||
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/marstr/guid"
|
||||
packages = ["."]
|
||||
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-runewidth"
|
||||
packages = ["."]
|
||||
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
|
||||
version = "v0.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/go-acd"
|
||||
packages = ["."]
|
||||
revision = "887eb06ab6a255fbf5744b5812788e884078620a"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
revision = "b2a7479cf26fa841ff90dd932d0221cb5c50782d"
|
||||
version = "v1.0.39"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/nsf/termbox-go"
|
||||
packages = ["."]
|
||||
revision = "5c94acc5e6eb520f1bcd183974e01171cc4c23b3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/okzk/sdnotify"
|
||||
packages = ["."]
|
||||
revision = "ed8ca104421a21947710335006107540e3ecb335"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/patrickmn/go-cache"
|
||||
packages = ["."]
|
||||
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
|
||||
version = "v2.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pengsrc/go-shared"
|
||||
packages = [
|
||||
"buffer",
|
||||
"check",
|
||||
"convert",
|
||||
"log",
|
||||
"reopen"
|
||||
]
|
||||
revision = "807ee759d82c84982a89fb3dc875ef884942f1e5"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "57673e38ea946592a59c26592b7e6fbda646975b"
|
||||
version = "1.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/rfjakob/eme"
|
||||
packages = ["."]
|
||||
revision = "01668ae55fe0b79a483095689043cce3e80260db"
|
||||
version = "v1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/russross/blackfriday"
|
||||
packages = ["."]
|
||||
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
|
||||
version = "v1.5.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
packages = ["."]
|
||||
revision = "f9261e73885de99b1647d68bedadf2b9a99ad11f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/skratchdot/open-golang"
|
||||
packages = ["open"]
|
||||
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [
|
||||
".",
|
||||
"doc"
|
||||
]
|
||||
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"require"
|
||||
]
|
||||
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||
version = "v1.2.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/t3rm1n4l/go-mega"
|
||||
packages = ["."]
|
||||
revision = "57978a63bd3f91fa7e188b751a7e7e6dd4e33813"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/xanzy/ssh-agent"
|
||||
packages = ["."]
|
||||
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/yunify/qingstor-sdk-go"
|
||||
packages = [
|
||||
".",
|
||||
"config",
|
||||
"logger",
|
||||
"request",
|
||||
"request/builder",
|
||||
"request/data",
|
||||
"request/errors",
|
||||
"request/signer",
|
||||
"request/unpacker",
|
||||
"service",
|
||||
"utils"
|
||||
]
|
||||
revision = "4f9ac88c5fec7350e960aabd0de1f1ede0ad2895"
|
||||
version = "v2.2.14"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"bcrypt",
|
||||
"blowfish",
|
||||
"curve25519",
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
"internal/chacha20",
|
||||
"internal/subtle",
|
||||
"nacl/secretbox",
|
||||
"pbkdf2",
|
||||
"poly1305",
|
||||
"salsa20/salsa",
|
||||
"scrypt",
|
||||
"ssh",
|
||||
"ssh/agent",
|
||||
"ssh/terminal"
|
||||
]
|
||||
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"html",
|
||||
"html/atom",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"publicsuffix",
|
||||
"webdav",
|
||||
"webdav/internal/xml",
|
||||
"websocket"
|
||||
]
|
||||
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [
|
||||
".",
|
||||
"google",
|
||||
"internal",
|
||||
"jws",
|
||||
"jwt"
|
||||
]
|
||||
revision = "1e0a3fa8ba9a5c9eb35c271780101fdaf1b205d7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows"
|
||||
]
|
||||
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
]
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = [
|
||||
"drive/v3",
|
||||
"gensupport",
|
||||
"googleapi",
|
||||
"googleapi/internal/uritemplates",
|
||||
"storage/v1"
|
||||
]
|
||||
revision = "2eea9ba0a3d94f6ab46508083e299a00bbbc65f6"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"internal/app_identity",
|
||||
"internal/base",
|
||||
"internal/datastore",
|
||||
"internal/log",
|
||||
"internal/modules",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"log",
|
||||
"urlfetch"
|
||||
]
|
||||
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "c1378c5fc821e27711155958ff64b3c74b56818ba4733dbfe0c86d518c32880e"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
11
Gopkg.toml
Normal file
11
Gopkg.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
# pin this to master to pull in the macOS changes
|
||||
# can likely remove for 1.43
|
||||
[[override]]
|
||||
branch = "master"
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
|
||||
# pin this to master to pull in the fix for linux/mips
|
||||
# can likely remove for 1.43
|
||||
[[override]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/bbolt"
|
||||
43
ISSUE_TEMPLATE.md
Normal file
43
ISSUE_TEMPLATE.md
Normal file
@@ -0,0 +1,43 @@
|
||||
<!--
|
||||
|
||||
Hi!
|
||||
|
||||
We understand you are having a problem with rclone or have an idea for an improvement - we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue. We'll reply quickly and it won't increase our massive issue backlog.
|
||||
|
||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
||||
|
||||
https://beta.rclone.org/
|
||||
|
||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
||||
|
||||
If you have an idea for an improvement, then please search the old issues first and if you don't find your idea, make a new issue.
|
||||
|
||||
Thanks
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
#### What is the problem you are having with rclone?
|
||||
|
||||
|
||||
#### What is your rclone version (eg output from `rclone -V`)
|
||||
|
||||
|
||||
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
|
||||
|
||||
#### Which cloud storage system are you using? (eg Google Drive)
|
||||
|
||||
|
||||
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
@@ -1,17 +1,12 @@
|
||||
# Maintainers guide for rclone #
|
||||
|
||||
Current active maintainers of rclone are:
|
||||
Current active maintainers of rclone are
|
||||
|
||||
| Name | GitHub ID | Specific Responsibilities |
|
||||
| :--------------- | :---------- | :-------------------------- |
|
||||
| Nick Craig-Wood | @ncw | overall project health |
|
||||
| Stefan Breunig | @breunigs | |
|
||||
| Ishuah Kariuki | @ishuah | |
|
||||
| Remus Bunduc | @remusb | cache backend |
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
* Nick Craig-Wood @ncw
|
||||
* Stefan Breunig @breunigs
|
||||
* Ishuah Kariuki @ishuah
|
||||
* Remus Bunduc @remusb - cache subsystem maintainer
|
||||
* Fabian Möller @B4dM4n
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
@@ -61,7 +56,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
|
||||
|
||||
Try to process pull requests promptly!
|
||||
|
||||
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
|
||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||
|
||||
|
||||
6273
MANUAL.html
6273
MANUAL.html
File diff suppressed because it is too large
Load Diff
6364
MANUAL.txt
6364
MANUAL.txt
File diff suppressed because it is too large
Load Diff
75
Makefile
75
Makefile
@@ -1,9 +1,5 @@
|
||||
SHELL = bash
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
ifeq ($(BRANCH),$(LAST_TAG))
|
||||
BRANCH := master
|
||||
endif
|
||||
TAG_BRANCH := -$(BRANCH)
|
||||
BRANCH_PATH := branch/
|
||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
@@ -11,14 +7,12 @@ ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
ifneq ($(TAG),$(LAST_TAG))
|
||||
TAG := $(TAG)-beta
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
# Run full tests if go >= go1.11
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 11)')
|
||||
# Run full tests if go >= go1.9
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 9)')
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
@@ -50,9 +44,10 @@ version:
|
||||
|
||||
# Full suite of integration tests
|
||||
test: rclone
|
||||
go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all
|
||||
-test_all 2>&1 | tee test_all.log
|
||||
@echo "Written logs in test_all.log"
|
||||
go install github.com/ncw/rclone/fstest/test_all
|
||||
-go test -v -count 1 $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
|
||||
-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
|
||||
@echo "Written logs in test.log and fs/test_all.log"
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
@@ -67,7 +62,7 @@ ifdef FULL_TESTS
|
||||
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||
errcheck $(BUILDTAGS) ./...
|
||||
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
|
||||
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl|ApplicationCredentialId)' ; test $$? -eq 1
|
||||
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
|
||||
else
|
||||
@echo Skipping source quality tests as version of go too old
|
||||
endif
|
||||
@@ -87,7 +82,8 @@ build_dep:
|
||||
ifdef FULL_TESTS
|
||||
go get -u github.com/kisielk/errcheck
|
||||
go get -u golang.org/x/tools/cmd/goimports
|
||||
go get -u golang.org/x/lint/golint
|
||||
go get -u github.com/golang/lint/golint
|
||||
go get -u github.com/tools/godep
|
||||
endif
|
||||
|
||||
# Get the release dependencies
|
||||
@@ -97,16 +93,15 @@ release_dep:
|
||||
|
||||
# Update dependencies
|
||||
update:
|
||||
GO111MODULE=on go get -u ./...
|
||||
GO111MODULE=on go mod tidy
|
||||
GO111MODULE=on go mod vendor
|
||||
go get -u github.com/golang/dep/cmd/dep
|
||||
dep ensure -update -v
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt
|
||||
|
||||
rclone.1: MANUAL.md
|
||||
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
|
||||
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs
|
||||
./bin/make_manual.py
|
||||
|
||||
MANUAL.html: MANUAL.md
|
||||
@@ -116,10 +111,7 @@ MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
./bin/make_backend_docs.py
|
||||
rclone gendocs docs/content/commands/
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
@@ -154,8 +146,8 @@ check_sign:
|
||||
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
||||
|
||||
upload:
|
||||
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
|
||||
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
|
||||
rclone -v copy --exclude '*current*' build/ memstore:downloads-rclone-org/$(TAG)
|
||||
rclone -v copy --include '*current*' --include version.txt build/ memstore:downloads-rclone-org
|
||||
|
||||
upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
@@ -164,16 +156,16 @@ cross: doc
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
||||
|
||||
beta:
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)β
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)β
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)%CE%B2/
|
||||
|
||||
log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
compile_all:
|
||||
ifdef FULL_TESTS
|
||||
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)β
|
||||
else
|
||||
@echo Skipping compile all as version of go too old
|
||||
endif
|
||||
@@ -185,13 +177,6 @@ ifndef BRANCH_PATH
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
circleci_upload:
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifndef BRANCH_PATH
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
@@ -202,16 +187,19 @@ ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
endif
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)β
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
# Fetch the binary builds from travis and appveyor
|
||||
fetch_binaries:
|
||||
rclone -P sync $(BETA_UPLOAD) build/
|
||||
# Fetch the windows builds from appveyor
|
||||
fetch_windows:
|
||||
rclone -v copy --include 'rclone-v*-windows-*.zip' $(BETA_UPLOAD) build/
|
||||
-#cp -av build/rclone-v*-windows-386.zip build/rclone-current-windows-386.zip
|
||||
-#cp -av build/rclone-v*-windows-amd64.zip build/rclone-current-windows-amd64.zip
|
||||
md5sum build/rclone-*-windows-*.zip | sort
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server -v -w
|
||||
@@ -222,10 +210,10 @@ tag: doc
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
|
||||
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
|
||||
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
|
||||
mv docs/content/changelog.md.new docs/content/changelog.md
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo "Then commit all the changes"
|
||||
@echo " * $(NEW_TAG) -" `date -I` >> docs/content/changelog.md
|
||||
@git log $(LAST_TAG)..$(NEW_TAG) --oneline >> docs/content/changelog.md
|
||||
@echo "Then commit the changes"
|
||||
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
@@ -238,3 +226,4 @@ startdev:
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
|
||||
101
README.md
101
README.md
@@ -2,11 +2,10 @@
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
[Download](https://rclone.org/downloads/) |
|
||||
[Contributing](CONTRIBUTING.md) |
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
[Forum](https://forum.rclone.org/) |
|
||||
[Forum](https://forum.rclone.org/)
|
||||
[G+](https://google.com/+RcloneOrg)
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone)
|
||||
@@ -14,83 +13,49 @@
|
||||
[](https://circleci.com/gh/ncw/rclone/tree/master)
|
||||
[](https://godoc.org/github.com/ncw/rclone)
|
||||
|
||||
# Rclone
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
|
||||
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
|
||||
* Amazon Drive
|
||||
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
|
||||
* Backblaze B2
|
||||
* Box
|
||||
* Dropbox
|
||||
* FTP
|
||||
* Google Cloud Storage
|
||||
* Google Drive
|
||||
* HTTP
|
||||
* Hubic
|
||||
* Mega
|
||||
* Microsoft Azure Blob Storage
|
||||
* Microsoft OneDrive
|
||||
* OpenDrive
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
|
||||
* pCloud
|
||||
* QingStor
|
||||
* SFTP
|
||||
* Webdav / Owncloud / Nextcloud
|
||||
* Yandex Disk
|
||||
* The local filesystem
|
||||
|
||||
## Storage providers
|
||||
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* Openstack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
|
||||
## Features
|
||||
Features
|
||||
|
||||
* MD5/SHA1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Copy mode to just copy new/changed files
|
||||
* Sync (one way) mode to make a directory identical
|
||||
* Check mode to check for file hash equality
|
||||
* Can sync to and from network, eg two different cloud accounts
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
* Optional encryption (Crypt)
|
||||
* Optional FUSE mount
|
||||
|
||||
## Installation & documentation
|
||||
See the home page for installation, usage, documentation, changelog
|
||||
and configuration walkthroughs.
|
||||
|
||||
Please see the [rclone website](https://rclone.org/) for:
|
||||
|
||||
* [Installation](https://rclone.org/install/)
|
||||
* [Documentation & configuration](https://rclone.org/docs/)
|
||||
* [Changelog](https://rclone.org/changelog/)
|
||||
* [FAQ](https://rclone.org/faq/)
|
||||
* [Storage providers](https://rclone.org/overview/)
|
||||
* [Forum](https://forum.rclone.org/)
|
||||
* ...and more
|
||||
|
||||
## Downloads
|
||||
|
||||
* https://rclone.org/downloads/
|
||||
* https://rclone.org/
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
[COPYING file](/COPYING) included in this package).
|
||||
COPYING file included in this package).
|
||||
|
||||
49
RELEASE.md
49
RELEASE.md
@@ -13,9 +13,14 @@ Making a release
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX"
|
||||
* make retag
|
||||
* make release_dep
|
||||
* # Set the GOPATH for a current stable go compiler
|
||||
* make cross
|
||||
* git checkout docs/content/commands # to undo date changes in commands
|
||||
* git push --tags origin master
|
||||
* # Wait for the appveyor and travis builds to complete then...
|
||||
* make fetch_binaries
|
||||
* git push --tags origin master:stable # update the stable branch for packager.io
|
||||
* # Wait for the appveyor and travis builds to complete then fetch the windows binaries from appveyor
|
||||
* make fetch_windows
|
||||
* make tarball
|
||||
* make sign_upload
|
||||
* make check_sign
|
||||
@@ -26,45 +31,11 @@ Making a release
|
||||
* # announce with forum post, twitter post, G+ post
|
||||
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* Review any pinned packages in Gopkg.toml and remove if possible
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
* carry forward any patches to vendor stuff
|
||||
* git commit -a -v
|
||||
|
||||
If `make update` fails with errors like this:
|
||||
|
||||
```
|
||||
# github.com/cpuguy83/go-md2man/md2man
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
||||
```
|
||||
|
||||
Can be fixed with
|
||||
|
||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
||||
* GO111MODULE=on go mod tidy
|
||||
* GO111MODULE=on go mod vendor
|
||||
|
||||
|
||||
Making a point release. If rclone needs a point release due to some
|
||||
horrendous bug, then
|
||||
* git branch v1.XX v1.XX-fixes
|
||||
* git cherry-pick any fixes
|
||||
* Test (see above)
|
||||
* make NEW_TAG=v1.XX.1 tag
|
||||
* edit docs/content/changelog.md
|
||||
* make TAG=v1.43.1 doc
|
||||
* git commit -a -v -m "Version v1.XX.1"
|
||||
* git tag -d -v1.XX.1
|
||||
* git tag -s -m "Version v1.XX.1" v1.XX.1
|
||||
* git push --tags -u origin v1.XX-fixes
|
||||
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
|
||||
* make TAG=v1.43.1 tarball
|
||||
* make TAG=v1.43.1 sign_upload
|
||||
* make TAG=v1.43.1 check_sign
|
||||
* make TAG=v1.43.1 upload
|
||||
* make TAG=v1.43.1 upload_website
|
||||
* make TAG=v1.43.1 upload_github
|
||||
* NB this overwrites the current beta so after the release, rebuild the last travis build
|
||||
* Announce!
|
||||
Make the version number be just in a file?
|
||||
|
||||
@@ -2,12 +2,12 @@ package alias
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -17,38 +17,29 @@ func init() {
|
||||
Description: "Alias for a existing remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
Required: true,
|
||||
Name: "remote",
|
||||
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.Remote == "" {
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
remote := config.FileGet(name, "remote")
|
||||
if remote == "" {
|
||||
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
|
||||
}
|
||||
if strings.HasPrefix(opt.Remote, name+":") {
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
|
||||
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
|
||||
|
||||
root = filepath.ToSlash(root)
|
||||
return fsInfo.NewFs(configName, path.Join(fsPath, root))
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
_ "github.com/ncw/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/ncw/rclone/backend/http"
|
||||
_ "github.com/ncw/rclone/backend/hubic"
|
||||
_ "github.com/ncw/rclone/backend/jottacloud"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/mega"
|
||||
_ "github.com/ncw/rclone/backend/onedrive"
|
||||
@@ -25,7 +24,6 @@ import (
|
||||
_ "github.com/ncw/rclone/backend/s3"
|
||||
_ "github.com/ncw/rclone/backend/sftp"
|
||||
_ "github.com/ncw/rclone/backend/swift"
|
||||
_ "github.com/ncw/rclone/backend/union"
|
||||
_ "github.com/ncw/rclone/backend/webdav"
|
||||
_ "github.com/ncw/rclone/backend/yandex"
|
||||
)
|
||||
|
||||
@@ -21,11 +21,10 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
acd "github.com/ncw/go-acd"
|
||||
"github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -38,17 +37,19 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
warnFileSize = 50000 << 20 // Display warning for files larger than this size
|
||||
defaultTempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
warnFileSize = 50000 << 20 // Display warning for files larger than this size
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
|
||||
uploadWaitPerGB = flags.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
|
||||
// Description of how to auth for this app
|
||||
acdConfig = &oauth2.Config{
|
||||
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
|
||||
@@ -66,91 +67,35 @@ var (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "amazon cloud drive",
|
||||
Prefix: "acd",
|
||||
Description: "Amazon Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig)
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("amazon cloud drive", name, acdConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Amazon Application Client ID.",
|
||||
Required: true,
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Amazon Application Client Id - required.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret.",
|
||||
Required: true,
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret - required.",
|
||||
}, {
|
||||
Name: config.ConfigAuthURL,
|
||||
Help: "Auth server URL.\nLeave blank to use Amazon's.",
|
||||
Advanced: true,
|
||||
Name: config.ConfigAuthURL,
|
||||
Help: "Auth server URL - leave blank to use Amazon's.",
|
||||
}, {
|
||||
Name: config.ConfigTokenURL,
|
||||
Help: "Token server url.\nleave blank to use Amazon's.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "checkpoint",
|
||||
Help: "Checkpoint for internal polling (debug).",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_wait_per_gb",
|
||||
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
|
||||
|
||||
Sometimes Amazon Drive gives an error when a file has been fully
|
||||
uploaded but the file appears anyway after a little while. This
|
||||
happens sometimes for files over 1GB in size and nearly every time for
|
||||
files bigger than 10GB. This parameter controls the time rclone waits
|
||||
for the file to appear.
|
||||
|
||||
The default value for this parameter is 3 minutes per GB, so by
|
||||
default it will wait 3 minutes for every GB uploaded to see if the
|
||||
file appears.
|
||||
|
||||
You can disable this feature by setting it to 0. This may cause
|
||||
conflict errors as rclone retries the failed upload but the file will
|
||||
most likely appear correctly eventually.
|
||||
|
||||
These values were determined empirically by observing lots of uploads
|
||||
of big files for a range of file sizes.
|
||||
|
||||
Upload with the "-v" flag to see more info about what rclone is doing
|
||||
in this situation.`,
|
||||
Default: fs.Duration(180 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "templink_threshold",
|
||||
Help: `Files >= this size will be downloaded via their tempLink.
|
||||
|
||||
Files this size or more will be downloaded via their "tempLink". This
|
||||
is to work around a problem with Amazon Drive which blocks downloads
|
||||
of files bigger than about 10GB. The default for this is 9GB which
|
||||
shouldn't need to be changed.
|
||||
|
||||
To download files above this threshold, rclone requests a "tempLink"
|
||||
which downloads the file through a temporary URL directly from the
|
||||
underlying S3 storage.`,
|
||||
Default: defaultTempLinkThreshold,
|
||||
Advanced: true,
|
||||
Name: config.ConfigTokenURL,
|
||||
Help: "Token server url - leave blank to use Amazon's.",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Checkpoint string `config:"checkpoint"`
|
||||
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
|
||||
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
|
||||
flags.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
|
||||
}
|
||||
|
||||
// Fs represents a remote acd server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
c *acd.Client // the connection to the acd server
|
||||
noAuthClient *http.Client // unauthenticated http client
|
||||
root string // the path we are working on
|
||||
@@ -246,13 +191,7 @@ func filterRequest(req *http.Request) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
@@ -262,16 +201,15 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, acdConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
||||
log.Fatalf("Failed to configure Amazon Drive: %v", err)
|
||||
}
|
||||
|
||||
c := acd.NewClient(oAuthClient)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||
noAuthClient: fshttp.NewClient(fs.Config),
|
||||
@@ -312,16 +250,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, f.trueRootID, &newF)
|
||||
newF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -329,13 +267,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
return &newF, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -594,13 +527,13 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
|
||||
}
|
||||
|
||||
// Don't wait for uploads - assume they will appear later
|
||||
if f.opt.UploadWaitPerGB <= 0 {
|
||||
if *uploadWaitPerGB <= 0 {
|
||||
fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
|
||||
return false, inInfo, inErr
|
||||
}
|
||||
|
||||
// Time we should wait for the upload
|
||||
uploadWaitPerByte := float64(f.opt.UploadWaitPerGB) / 1024 / 1024 / 1024
|
||||
uploadWaitPerByte := float64(*uploadWaitPerGB) / 1024 / 1024 / 1024
|
||||
timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))
|
||||
|
||||
const sleepTime = 5 * time.Second // sleep between tries
|
||||
@@ -1082,7 +1015,7 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
|
||||
bigObject := o.Size() >= int64(tempLinkThreshold)
|
||||
if bigObject {
|
||||
fs.Debugf(o, "Downloading large object via tempLink")
|
||||
}
|
||||
@@ -1274,38 +1207,24 @@ func (o *Object) MimeType() string {
|
||||
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
checkpoint := f.opt.Checkpoint
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
checkpoint := config.FileGet(f.name, "checkpoint")
|
||||
|
||||
quit := make(chan bool)
|
||||
go func() {
|
||||
var ticker *time.Ticker
|
||||
var tickerC <-chan time.Time
|
||||
for {
|
||||
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
|
||||
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
|
||||
fs.Debugf(f, "Unable to save checkpoint: %v", err)
|
||||
}
|
||||
select {
|
||||
case pollInterval, ok := <-pollIntervalChan:
|
||||
if !ok {
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
}
|
||||
return
|
||||
}
|
||||
if pollInterval == 0 {
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
ticker, tickerC = nil, nil
|
||||
}
|
||||
} else {
|
||||
ticker = time.NewTicker(pollInterval)
|
||||
tickerC = ticker.C
|
||||
}
|
||||
case <-tickerC:
|
||||
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
|
||||
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
|
||||
fs.Debugf(f, "Unable to save checkpoint: %v", err)
|
||||
}
|
||||
case <-quit:
|
||||
return
|
||||
case <-time.After(pollInterval):
|
||||
}
|
||||
}
|
||||
}()
|
||||
return quit
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,18 +0,0 @@
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// Check first feature flags are set on this
|
||||
// remote
|
||||
enabled := f.Features().SetTier
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
@@ -1,37 +1,17 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
package azureblob_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/backend/azureblob"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*azureblob.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 solaris !go1.8
|
||||
|
||||
package azureblob
|
||||
@@ -31,6 +31,11 @@ func (e *Error) Fatal() bool {
|
||||
|
||||
var _ fserrors.Fataler = (*Error)(nil)
|
||||
|
||||
// Account describes a B2 account
|
||||
type Account struct {
|
||||
ID string `json:"accountId"` // The identifier for the account.
|
||||
}
|
||||
|
||||
// Bucket describes a B2 bucket
|
||||
type Bucket struct {
|
||||
ID string `json:"bucketId"`
|
||||
@@ -69,7 +74,7 @@ const versionFormat = "-v2006-01-02-150405.000"
|
||||
func (t Timestamp) AddVersion(remote string) string {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
s := time.Time(t).Format(versionFormat)
|
||||
s := (time.Time)(t).Format(versionFormat)
|
||||
// Replace the '.' with a '-'
|
||||
s = strings.Replace(s, ".", "-", -1)
|
||||
return base + s + ext
|
||||
@@ -102,20 +107,20 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
|
||||
// IsZero returns true if the timestamp is unitialised
|
||||
func (t Timestamp) IsZero() bool {
|
||||
return time.Time(t).IsZero()
|
||||
return (time.Time)(t).IsZero()
|
||||
}
|
||||
|
||||
// Equal compares two timestamps
|
||||
//
|
||||
// If either are !IsZero then it returns false
|
||||
func (t Timestamp) Equal(s Timestamp) bool {
|
||||
if time.Time(t).IsZero() {
|
||||
if (time.Time)(t).IsZero() {
|
||||
return false
|
||||
}
|
||||
if time.Time(s).IsZero() {
|
||||
if (time.Time)(s).IsZero() {
|
||||
return false
|
||||
}
|
||||
return time.Time(t).Equal(time.Time(s))
|
||||
return (time.Time)(t).Equal((time.Time)(s))
|
||||
}
|
||||
|
||||
// File is info about a file
|
||||
@@ -132,27 +137,10 @@ type File struct {
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
|
||||
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
|
||||
}
|
||||
|
||||
// ListBucketsRequest is parameters for b2_list_buckets call
|
||||
type ListBucketsRequest struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId,omitempty"` // When specified, the result will be a list containing just this bucket.
|
||||
BucketName string `json:"bucketName,omitempty"` // When specified, the result will be a list containing just this bucket.
|
||||
BucketTypes []string `json:"bucketTypes,omitempty"` // If present, B2 will use it as a filter for bucket types returned in the list buckets response.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
}
|
||||
|
||||
// ListBucketsResponse is as returned from the b2_list_buckets call
|
||||
|
||||
278
backend/b2/b2.go
278
backend/b2/b2.go
@@ -22,8 +22,8 @@ import (
|
||||
"github.com/ncw/rclone/backend/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -34,27 +34,30 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultEndpoint = "https://api.backblazeb2.com"
|
||||
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
|
||||
timeKey = "src_last_modified_millis"
|
||||
timeHeader = headerPrefix + timeKey
|
||||
sha1Key = "large_file_sha1"
|
||||
sha1Header = "X-Bz-Content-Sha1"
|
||||
sha1InfoHeader = headerPrefix + sha1Key
|
||||
testModeHeader = "X-Bz-Test-Mode"
|
||||
retryAfterHeader = "Retry-After"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
minChunkSize = 5 * fs.MebiByte
|
||||
defaultChunkSize = 96 * fs.MebiByte
|
||||
defaultUploadCutoff = 200 * fs.MebiByte
|
||||
defaultEndpoint = "https://api.backblazeb2.com"
|
||||
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
|
||||
timeKey = "src_last_modified_millis"
|
||||
timeHeader = headerPrefix + timeKey
|
||||
sha1Key = "large_file_sha1"
|
||||
sha1Header = "X-Bz-Content-Sha1"
|
||||
sha1InfoHeader = headerPrefix + sha1Key
|
||||
testModeHeader = "X-Bz-Test-Mode"
|
||||
retryAfterHeader = "Retry-After"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
minChunkSize = fs.SizeSuffix(5E6)
|
||||
chunkSize = fs.SizeSuffix(96 * 1024 * 1024)
|
||||
uploadCutoff = fs.SizeSuffix(200E6)
|
||||
b2TestMode = flags.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
|
||||
b2Versions = flags.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
|
||||
b2HardDelete = flags.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.")
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
)
|
||||
|
||||
@@ -65,89 +68,29 @@ func init() {
|
||||
Description: "Backblaze B2",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID",
|
||||
Required: true,
|
||||
Name: "account",
|
||||
Help: "Account ID",
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Application Key",
|
||||
Required: true,
|
||||
Name: "key",
|
||||
Help: "Application Key",
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "test_mode",
|
||||
Help: `A flag string for X-Bz-Test-Mode header for debugging.
|
||||
|
||||
This is for debugging purposes only. Setting it to one of the strings
|
||||
below will cause b2 to return specific errors:
|
||||
|
||||
* "fail_some_uploads"
|
||||
* "expire_some_account_authorization_tokens"
|
||||
* "force_cap_exceeded"
|
||||
|
||||
These will be set in the "X-Bz-Test-Mode" header which is documented
|
||||
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
|
||||
Default: "",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "versions",
|
||||
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Permanently delete files on remote removal, otherwise hide files.",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
|
||||
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||
|
||||
This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size. Must fit in memory.
|
||||
|
||||
When uploading large files, chunk the file into this size. Note that
|
||||
these chunks are buffered in memory and there might a maximum of
|
||||
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
|
||||
minimim size.`,
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service - leave blank normally.",
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
flags.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||
flags.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
account string // account name
|
||||
key string // auth key
|
||||
endpoint string // name of the starting api endpoint
|
||||
srv *rest.Client // the connection to the b2 server
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
@@ -202,7 +145,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
}
|
||||
|
||||
// Pattern to match a b2 path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a b2 'url'
|
||||
func parsePath(path string) (bucket, directory string, err error) {
|
||||
@@ -288,73 +231,37 @@ func errorHandler(resp *http.Response) error {
|
||||
return errResponse
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
f.fillBufferTokens() // reset the buffer tokens
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
|
||||
if cs < opt.ChunkSize {
|
||||
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(&f.opt, cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadCutoff < chunkSize {
|
||||
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", uploadCutoff, chunkSize)
|
||||
}
|
||||
err = checkUploadCutoff(opt, opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2: upload cutoff")
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2: chunk size")
|
||||
if chunkSize < minChunkSize {
|
||||
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, chunkSize)
|
||||
}
|
||||
bucket, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.Account == "" {
|
||||
account := config.FileGet(name, "account")
|
||||
if account == "" {
|
||||
return nil, errors.New("account not found")
|
||||
}
|
||||
if opt.Key == "" {
|
||||
key := config.FileGet(name, "key")
|
||||
if key == "" {
|
||||
return nil, errors.New("key not found")
|
||||
}
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = defaultEndpoint
|
||||
}
|
||||
endpoint := config.FileGet(name, "endpoint", defaultEndpoint)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
account: account,
|
||||
key: key,
|
||||
endpoint: endpoint,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
bufferTokens: make(chan []byte, fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -362,28 +269,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
// Set the test flag if required
|
||||
if opt.TestMode != "" {
|
||||
testMode := strings.TrimSpace(opt.TestMode)
|
||||
if *b2TestMode != "" {
|
||||
testMode := strings.TrimSpace(*b2TestMode)
|
||||
f.srv.SetHeader(testModeHeader, testMode)
|
||||
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
|
||||
}
|
||||
f.fillBufferTokens()
|
||||
// Fill up the buffer tokens
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
f.bufferTokens <- nil
|
||||
}
|
||||
err = f.authorizeAccount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to authorize account")
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.bucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := f.info.Allowed.BucketName
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
if allowedBucket != f.bucket {
|
||||
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
}
|
||||
f.markBucketOK()
|
||||
f.setBucketID(f.info.Allowed.BucketID)
|
||||
}
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
@@ -418,9 +316,9 @@ func (f *Fs) authorizeAccount() error {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/b2api/v1/b2_authorize_account",
|
||||
RootURL: f.opt.Endpoint,
|
||||
UserName: f.opt.Account,
|
||||
Password: f.opt.Key,
|
||||
RootURL: f.endpoint,
|
||||
UserName: f.account,
|
||||
Password: f.key,
|
||||
ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -482,19 +380,11 @@ func (f *Fs) clearUploadURL() {
|
||||
f.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Fill up (or reset) the buffer tokens
|
||||
func (f *Fs) fillBufferTokens() {
|
||||
f.bufferTokens = make(chan []byte, fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
f.bufferTokens <- nil
|
||||
}
|
||||
}
|
||||
|
||||
// getUploadBlock gets a block from the pool of size chunkSize
|
||||
func (f *Fs) getUploadBlock() []byte {
|
||||
buf := <-f.bufferTokens
|
||||
if buf == nil {
|
||||
buf = make([]byte, f.opt.ChunkSize)
|
||||
buf = make([]byte, chunkSize)
|
||||
}
|
||||
// fs.Debugf(f, "Getting upload block %p", buf)
|
||||
return buf
|
||||
@@ -503,7 +393,7 @@ func (f *Fs) getUploadBlock() []byte {
|
||||
// putUploadBlock returns a block to the pool of size chunkSize
|
||||
func (f *Fs) putUploadBlock(buf []byte) {
|
||||
buf = buf[:cap(buf)]
|
||||
if len(buf) != int(f.opt.ChunkSize) {
|
||||
if len(buf) != int(chunkSize) {
|
||||
panic("bad blocksize returned to pool")
|
||||
}
|
||||
// fs.Debugf(f, "Returning upload block %p", buf)
|
||||
@@ -673,7 +563,7 @@ func (f *Fs) markBucketOK() {
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
last := ""
|
||||
err = f.list(dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = f.list(dir, false, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -745,7 +635,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
last := ""
|
||||
err = f.list(dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = f.list(dir, true, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -765,11 +655,7 @@ type listBucketFn func(*api.Bucket) error
|
||||
|
||||
// listBucketsToFn lists the buckets to the function supplied
|
||||
func (f *Fs) listBucketsToFn(fn listBucketFn) error {
|
||||
var account = api.ListBucketsRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
BucketID: f.info.Allowed.BucketID,
|
||||
}
|
||||
|
||||
var account = api.Account{ID: f.info.AccountID}
|
||||
var response api.ListBucketsResponse
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -993,12 +879,6 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
errReturn = err
|
||||
}
|
||||
}
|
||||
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
||||
if time.Since(time.Time(timestamp)).Hours() > 24 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Delete Config.Transfers in parallel
|
||||
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
|
||||
@@ -1022,9 +902,6 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
if object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
toBeDeleted <- object
|
||||
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||
toBeDeleted <- object
|
||||
} else {
|
||||
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
|
||||
}
|
||||
@@ -1158,12 +1035,12 @@ func (o *Object) readMetaData() (err error) {
|
||||
maxSearched := 1
|
||||
var timestamp api.Timestamp
|
||||
baseRemote := o.remote
|
||||
if o.fs.opt.Versions {
|
||||
if *b2Versions {
|
||||
timestamp, baseRemote = api.RemoveVersion(baseRemote)
|
||||
maxSearched = maxVersions
|
||||
}
|
||||
var info *api.File
|
||||
err = o.fs.list("", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = o.fs.list("", true, baseRemote, maxSearched, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
@@ -1377,7 +1254,7 @@ func urlEncode(in string) string {
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if o.fs.opt.Versions {
|
||||
if *b2Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
err = o.fs.Mkdir("")
|
||||
@@ -1412,7 +1289,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
} else if size > int64(o.fs.opt.UploadCutoff) {
|
||||
} else if size > int64(uploadCutoff) {
|
||||
up, err := o.fs.newLargeUpload(o, in, src)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1506,6 +1383,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
},
|
||||
ContentLength: &size,
|
||||
}
|
||||
// for go1.8 (see release notes) we must nil the Body if we want a
|
||||
// "Content-Length: 0" header which b2 requires for all files.
|
||||
if size == 0 {
|
||||
opts.Body = nil
|
||||
}
|
||||
var response api.FileInfo
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
@@ -1526,10 +1408,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
if o.fs.opt.Versions {
|
||||
if *b2Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
if o.fs.opt.HardDelete {
|
||||
if *b2HardDelete {
|
||||
return o.fs.deleteByID(o.id, o.fs.root+o.remote)
|
||||
}
|
||||
return o.fs.hide(o.fs.root + o.remote)
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test B2 filesystem interface
|
||||
package b2
|
||||
package b2_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/backend/b2"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,23 +12,6 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestB2:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
NeedMultipleChunks: true,
|
||||
},
|
||||
NilObject: (*b2.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -86,10 +86,10 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", fs.SizeSuffix(chunkSize), fs.SizeSuffix(maxParts*chunkSize))
|
||||
} else {
|
||||
parts = size / int64(o.fs.opt.ChunkSize)
|
||||
if size%int64(o.fs.opt.ChunkSize) != 0 {
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
@@ -116,10 +116,8 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
||||
},
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -411,8 +409,8 @@ outer:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(up.f.opt.ChunkSize) {
|
||||
reqSize = int64(up.f.opt.ChunkSize)
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
|
||||
@@ -61,7 +61,7 @@ func (e *Error) Error() string {
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
@@ -86,10 +86,6 @@ type Item struct {
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
SharedLink struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Access string `json:"access,omitempty"`
|
||||
} `json:"shared_link"`
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the item
|
||||
@@ -149,14 +145,6 @@ type CopyFile struct {
|
||||
Parent Parent `json:"parent"`
|
||||
}
|
||||
|
||||
// CreateSharedLink is the request for Public Link
|
||||
type CreateSharedLink struct {
|
||||
SharedLink struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Access string `json:"access,omitempty"`
|
||||
} `json:"shared_link"`
|
||||
}
|
||||
|
||||
// UploadSessionRequest is uses in Create Upload Session
|
||||
type UploadSessionRequest struct {
|
||||
FolderID string `json:"folder_id,omitempty"` // don't pass for update
|
||||
@@ -184,8 +172,8 @@ type UploadSessionResponse struct {
|
||||
// Part defines the return from upload part call which are passed to commit upload also
|
||||
type Part struct {
|
||||
PartID string `json:"part_id"`
|
||||
Offset int64 `json:"offset"`
|
||||
Size int64 `json:"size"`
|
||||
Offset int `json:"offset"`
|
||||
Size int `json:"size"`
|
||||
Sha1 string `json:"sha1"`
|
||||
}
|
||||
|
||||
|
||||
@@ -23,8 +23,7 @@ import (
|
||||
"github.com/ncw/rclone/backend/box/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -47,7 +46,6 @@ const (
|
||||
uploadURL = "https://upload.box.com/api/2.0"
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
||||
defaultUploadCutoff = 50 * 1024 * 1024
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -63,6 +61,7 @@ var (
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
uploadCutoff = fs.SizeSuffix(50 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -71,43 +70,27 @@ func init() {
|
||||
Name: "box",
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("box", name, m, oauthConfig)
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("box", name, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Box App Client Id.\nLeave blank normally.",
|
||||
Help: "Box App Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Box App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "commit_retries",
|
||||
Help: "Max number of times to try committing a multipart file.",
|
||||
Default: 100,
|
||||
Advanced: true,
|
||||
Help: "Box App Client Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CommitRetries int `config:"commit_retries"`
|
||||
flags.VarP(&uploadCutoff, "box-upload-cutoff", "", "Cutoff for switching to multipart upload")
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
@@ -126,7 +109,6 @@ type Object struct {
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
publicLink string // Public Link for the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
}
|
||||
|
||||
@@ -237,28 +219,20 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.UploadCutoff < minUploadCutoff {
|
||||
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadCutoff < minUploadCutoff {
|
||||
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", uploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
log.Fatalf("Failed to configure Box: %v", err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
@@ -283,16 +257,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, rootID, &newF)
|
||||
newF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -300,14 +274,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f.features.Fill(&tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
return &newF, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -681,7 +649,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copyFile := api.CopyFile{
|
||||
copy := api.CopyFile{
|
||||
Name: replacedLeaf,
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
@@ -690,7 +658,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
var resp *http.Response
|
||||
var info *api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, ©File, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, ©, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -851,46 +819,6 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(remote string) (string, error) {
|
||||
id, err := f.dirCache.FindDir(remote, false)
|
||||
var opts rest.Opts
|
||||
if err == nil {
|
||||
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
||||
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/folders/" + id,
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
||||
o, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if o.(*Object).publicLink != "" {
|
||||
return o.(*Object).publicLink, nil
|
||||
}
|
||||
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/files/" + o.(*Object).id,
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
}
|
||||
|
||||
shareLink := api.CreateSharedLink{}
|
||||
var info api.Item
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info.SharedLink.URL, err
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing as an
|
||||
// optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
@@ -955,7 +883,6 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
o.sha1 = info.SHA1
|
||||
o.modTime = info.ModTime()
|
||||
o.id = info.ID
|
||||
o.publicLink = info.SharedLink.URL
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1062,8 +989,8 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
|
||||
var resp *http.Response
|
||||
var result api.FolderItems
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Body: in,
|
||||
Method: "POST",
|
||||
Body: in,
|
||||
MultipartMetadataName: "attributes",
|
||||
MultipartContentName: "contents",
|
||||
MultipartFileName: upload.Name,
|
||||
@@ -1108,7 +1035,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Upload with simple or multipart
|
||||
if size <= int64(o.fs.opt.UploadCutoff) {
|
||||
if size <= int64(uploadCutoff) {
|
||||
err = o.upload(in, leaf, directoryID, modTime)
|
||||
} else {
|
||||
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
|
||||
@@ -1135,7 +1062,6 @@ var (
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -96,9 +96,7 @@ func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.T
|
||||
request.Attributes.ContentCreatedAt = api.Time(modTime)
|
||||
var body []byte
|
||||
var resp *http.Response
|
||||
// For discussion of this value see:
|
||||
// https://github.com/ncw/rclone/issues/2054
|
||||
maxTries := o.fs.opt.CommitRetries
|
||||
maxTries := fs.Config.LowLevelRetries
|
||||
const defaultDelay = 10
|
||||
var tries int
|
||||
outer:
|
||||
|
||||
789
backend/cache/cache.go
vendored
789
backend/cache/cache.go
vendored
File diff suppressed because it is too large
Load Diff
117
backend/cache/cache_internal_test.go
vendored
117
backend/cache/cache_internal_test.go
vendored
@@ -4,9 +4,6 @@ package cache_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
@@ -15,26 +12,34 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"encoding/base64"
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/object"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fs/rc/rcflags"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
flag "github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -135,7 +140,7 @@ func TestInternalVfsCache(t *testing.T) {
|
||||
|
||||
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"cache-writes": "true", "cache-info-age": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
@@ -690,11 +695,11 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
cacheExpire := rc.Calls.Get("cache/expire")
|
||||
assert.NotNil(t, cacheExpire)
|
||||
rcflags.Opt.Enabled = true
|
||||
rc.Start(&rcflags.Opt)
|
||||
|
||||
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"rc": "true"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
if !runInstance.useMount {
|
||||
@@ -724,8 +729,13 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
||||
|
||||
// Call the rc function
|
||||
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
|
||||
m := make(map[string]string)
|
||||
res, err := http.Post(fmt.Sprintf("http://localhost:5572/cache/expire?remote=%s", "data.bin"), "application/json; charset=utf-8", strings.NewReader(""))
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = res.Body.Close()
|
||||
}()
|
||||
_ = json.NewDecoder(res.Body).Decode(&m)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
@@ -745,8 +755,13 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
li1, err = runInstance.list(t, rootFs, "")
|
||||
require.Len(t, li1, 1)
|
||||
|
||||
// Call the rc function
|
||||
m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
|
||||
m = make(map[string]string)
|
||||
res2, err := http.Post("http://localhost:5572/cache/expire?remote=/", "application/json; charset=utf-8", strings.NewReader(""))
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = res2.Body.Close()
|
||||
}()
|
||||
_ = json.NewDecoder(res2.Body).Decode(&m)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
@@ -759,7 +774,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
|
||||
func TestInternalCacheWrites(t *testing.T) {
|
||||
id := "ticw"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-writes": "true"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
@@ -778,7 +793,7 @@ func TestInternalCacheWrites(t *testing.T) {
|
||||
|
||||
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-workers": "1"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
@@ -853,7 +868,7 @@ func TestInternalBug2117(t *testing.T) {
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
|
||||
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
map[string]string{"cache-info-age": "72h", "cache-chunk-clean-interval": "15m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
@@ -903,7 +918,10 @@ func TestInternalBug2117(t *testing.T) {
|
||||
// run holds the remotes for a test run
|
||||
type run struct {
|
||||
okDiff time.Duration
|
||||
runDefaultCfgMap configmap.Simple
|
||||
allCfgMap map[string]string
|
||||
allFlagMap map[string]string
|
||||
runDefaultCfgMap map[string]string
|
||||
runDefaultFlagMap map[string]string
|
||||
mntDir string
|
||||
tmpUploadDir string
|
||||
useMount bool
|
||||
@@ -927,16 +945,38 @@ func newRun() *run {
|
||||
isMounted: false,
|
||||
}
|
||||
|
||||
// Read in all the defaults for all the options
|
||||
fsInfo, err := fs.Find("cache")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Couldn't find cache remote: %v", err))
|
||||
r.allCfgMap = map[string]string{
|
||||
"plex_url": "",
|
||||
"plex_username": "",
|
||||
"plex_password": "",
|
||||
"chunk_size": cache.DefCacheChunkSize,
|
||||
"info_age": cache.DefCacheInfoAge,
|
||||
"chunk_total_size": cache.DefCacheTotalChunkSize,
|
||||
}
|
||||
r.runDefaultCfgMap = configmap.Simple{}
|
||||
for _, option := range fsInfo.Options {
|
||||
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
|
||||
r.allFlagMap = map[string]string{
|
||||
"cache-db-path": filepath.Join(config.CacheDir, "cache-backend"),
|
||||
"cache-chunk-path": filepath.Join(config.CacheDir, "cache-backend"),
|
||||
"cache-db-purge": "true",
|
||||
"cache-chunk-size": cache.DefCacheChunkSize,
|
||||
"cache-total-chunk-size": cache.DefCacheTotalChunkSize,
|
||||
"cache-chunk-clean-interval": cache.DefCacheChunkCleanInterval,
|
||||
"cache-info-age": cache.DefCacheInfoAge,
|
||||
"cache-read-retries": strconv.Itoa(cache.DefCacheReadRetries),
|
||||
"cache-workers": strconv.Itoa(cache.DefCacheTotalWorkers),
|
||||
"cache-chunk-no-memory": "false",
|
||||
"cache-rps": strconv.Itoa(cache.DefCacheRps),
|
||||
"cache-writes": "false",
|
||||
"cache-tmp-upload-path": "",
|
||||
"cache-tmp-wait-time": cache.DefCacheTmpWaitTime,
|
||||
}
|
||||
r.runDefaultCfgMap = make(map[string]string)
|
||||
for key, value := range r.allCfgMap {
|
||||
r.runDefaultCfgMap[key] = value
|
||||
}
|
||||
r.runDefaultFlagMap = make(map[string]string)
|
||||
for key, value := range r.allFlagMap {
|
||||
r.runDefaultFlagMap[key] = value
|
||||
}
|
||||
|
||||
if mountDir == "" {
|
||||
if runtime.GOOS != "windows" {
|
||||
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
|
||||
@@ -1046,22 +1086,28 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
fs.Config.LowLevelRetries = 1
|
||||
|
||||
m := configmap.Simple{}
|
||||
for k, v := range r.runDefaultCfgMap {
|
||||
m.Set(k, v)
|
||||
if c, ok := cfg[k]; ok {
|
||||
config.FileSet(cacheRemote, k, c)
|
||||
} else {
|
||||
config.FileSet(cacheRemote, k, v)
|
||||
}
|
||||
}
|
||||
for k, v := range flags {
|
||||
m.Set(k, v)
|
||||
for k, v := range r.runDefaultFlagMap {
|
||||
if c, ok := flags[k]; ok {
|
||||
_ = flag.Set(k, c)
|
||||
} else {
|
||||
_ = flag.Set(k, v)
|
||||
}
|
||||
}
|
||||
fs.Config.LowLevelRetries = 1
|
||||
|
||||
// Instantiate root
|
||||
if purge {
|
||||
boltDb.PurgeTempUploads()
|
||||
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
|
||||
}
|
||||
f, err := cache.NewFs(remote, id, m)
|
||||
f, err := fs.NewFs(remote + ":" + id)
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
require.NoError(t, err)
|
||||
@@ -1111,6 +1157,9 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
}
|
||||
r.tempFiles = nil
|
||||
debug.FreeOSMemory()
|
||||
for k, v := range r.runDefaultFlagMap {
|
||||
_ = flag.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
|
||||
17
backend/cache/cache_upload_test.go
vendored
17
backend/cache/cache_upload_test.go
vendored
@@ -3,7 +3,6 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
@@ -11,6 +10,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
@@ -21,7 +22,7 @@ func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
@@ -62,7 +63,7 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
@@ -72,7 +73,7 @@ func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
@@ -82,7 +83,7 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
@@ -162,7 +163,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
@@ -212,7 +213,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
@@ -342,7 +343,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
13
backend/cache/directory.go
vendored
13
backend/cache/directory.go
vendored
@@ -3,15 +3,16 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"path"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Directory is a generic dir that stores basic information about it
|
||||
type Directory struct {
|
||||
Directory fs.Directory `json:"-"` // can be nil
|
||||
fs.Directory `json:"-"`
|
||||
|
||||
CacheFs *Fs `json:"-"` // cache fs
|
||||
Name string `json:"name"` // name of the directory
|
||||
@@ -124,14 +125,6 @@ func (d *Directory) Items() int64 {
|
||||
return d.CacheItems
|
||||
}
|
||||
|
||||
// ID returns the ID of the cached directory if known
|
||||
func (d *Directory) ID() string {
|
||||
if d.Directory == nil {
|
||||
return ""
|
||||
}
|
||||
return d.Directory.ID()
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Directory = (*Directory)(nil)
|
||||
)
|
||||
|
||||
113
backend/cache/handle.go
vendored
113
backend/cache/handle.go
vendored
@@ -5,11 +5,12 @@ package cache
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
@@ -48,13 +49,12 @@ type Handle struct {
|
||||
offset int64
|
||||
seenOffsets map[int64]bool
|
||||
mu sync.Mutex
|
||||
workersWg sync.WaitGroup
|
||||
confirmReading chan bool
|
||||
workers int
|
||||
maxWorkerID int
|
||||
UseMemory bool
|
||||
closed bool
|
||||
reading bool
|
||||
|
||||
UseMemory bool
|
||||
workers []*worker
|
||||
closed bool
|
||||
reading bool
|
||||
}
|
||||
|
||||
// NewObjectHandle returns a new Handle for an existing Object
|
||||
@@ -65,14 +65,14 @@ func NewObjectHandle(o *Object, cfs *Fs) *Handle {
|
||||
offset: 0,
|
||||
preloadOffset: -1, // -1 to trigger the first preload
|
||||
|
||||
UseMemory: !cfs.opt.ChunkNoMemory,
|
||||
UseMemory: cfs.chunkMemory,
|
||||
reading: false,
|
||||
}
|
||||
r.seenOffsets = make(map[int64]bool)
|
||||
r.memory = NewMemory(-1)
|
||||
|
||||
// create a larger buffer to queue up requests
|
||||
r.preloadQueue = make(chan int64, r.cfs.opt.TotalWorkers*10)
|
||||
r.preloadQueue = make(chan int64, r.cfs.totalWorkers*10)
|
||||
r.confirmReading = make(chan bool)
|
||||
r.startReadWorkers()
|
||||
return r
|
||||
@@ -95,10 +95,10 @@ func (r *Handle) String() string {
|
||||
|
||||
// startReadWorkers will start the worker pool
|
||||
func (r *Handle) startReadWorkers() {
|
||||
if r.workers > 0 {
|
||||
if r.hasAtLeastOneWorker() {
|
||||
return
|
||||
}
|
||||
totalWorkers := r.cacheFs().opt.TotalWorkers
|
||||
totalWorkers := r.cacheFs().totalWorkers
|
||||
|
||||
if r.cacheFs().plexConnector.isConfigured() {
|
||||
if !r.cacheFs().plexConnector.isConnected() {
|
||||
@@ -117,27 +117,26 @@ func (r *Handle) startReadWorkers() {
|
||||
|
||||
// scaleOutWorkers will increase the worker pool count by the provided amount
|
||||
func (r *Handle) scaleWorkers(desired int) {
|
||||
current := r.workers
|
||||
current := len(r.workers)
|
||||
if current == desired {
|
||||
return
|
||||
}
|
||||
if current > desired {
|
||||
// scale in gracefully
|
||||
for r.workers > desired {
|
||||
for i := 0; i < current-desired; i++ {
|
||||
r.preloadQueue <- -1
|
||||
r.workers--
|
||||
}
|
||||
} else {
|
||||
// scale out
|
||||
for r.workers < desired {
|
||||
for i := 0; i < desired-current; i++ {
|
||||
w := &worker{
|
||||
r: r,
|
||||
id: r.maxWorkerID,
|
||||
ch: r.preloadQueue,
|
||||
id: current + i,
|
||||
}
|
||||
r.workersWg.Add(1)
|
||||
r.workers++
|
||||
r.maxWorkerID++
|
||||
go w.run()
|
||||
|
||||
r.workers = append(r.workers, w)
|
||||
}
|
||||
}
|
||||
// ignore first scale out from 0
|
||||
@@ -149,7 +148,7 @@ func (r *Handle) scaleWorkers(desired int) {
|
||||
func (r *Handle) confirmExternalReading() {
|
||||
// if we have a max value of workers
|
||||
// then we skip this step
|
||||
if r.workers > 1 ||
|
||||
if len(r.workers) > 1 ||
|
||||
!r.cacheFs().plexConnector.isConfigured() {
|
||||
return
|
||||
}
|
||||
@@ -157,7 +156,7 @@ func (r *Handle) confirmExternalReading() {
|
||||
return
|
||||
}
|
||||
fs.Infof(r, "confirmed reading by external reader")
|
||||
r.scaleWorkers(r.cacheFs().opt.TotalWorkers)
|
||||
r.scaleWorkers(r.cacheFs().totalMaxWorkers)
|
||||
}
|
||||
|
||||
// queueOffset will send an offset to the workers if it's different from the last one
|
||||
@@ -179,8 +178,8 @@ func (r *Handle) queueOffset(offset int64) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < r.workers; i++ {
|
||||
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
o := r.preloadOffset + r.cacheFs().chunkSize*int64(i)
|
||||
if o < 0 || o >= r.cachedObject.Size() {
|
||||
continue
|
||||
}
|
||||
@@ -194,6 +193,16 @@ func (r *Handle) queueOffset(offset int64) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Handle) hasAtLeastOneWorker() bool {
|
||||
oneWorker := false
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
if r.workers[i].isRunning() {
|
||||
oneWorker = true
|
||||
}
|
||||
}
|
||||
return oneWorker
|
||||
}
|
||||
|
||||
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
|
||||
// it can be from transient or persistent cache
|
||||
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
|
||||
@@ -202,7 +211,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
var err error
|
||||
|
||||
// we calculate the modulus of the requested offset with the size of a chunk
|
||||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||
offset := chunkStart % r.cacheFs().chunkSize
|
||||
|
||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||
chunkStart = chunkStart - offset
|
||||
@@ -219,7 +228,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
if !found {
|
||||
// we're gonna give the workers a chance to pickup the chunk
|
||||
// and retry a couple of times
|
||||
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
|
||||
for i := 0; i < r.cacheFs().readRetries*8; i++ {
|
||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
@@ -234,7 +243,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
// not found in ram or
|
||||
// the worker didn't managed to download the chunk in time so we abort and close the stream
|
||||
if err != nil || len(data) == 0 || !found {
|
||||
if r.workers == 0 {
|
||||
if !r.hasAtLeastOneWorker() {
|
||||
fs.Errorf(r, "out of workers")
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
@@ -246,7 +255,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
if offset > 0 {
|
||||
if offset > int64(len(data)) {
|
||||
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
|
||||
r.offset, chunkStart, len(data), offset, r.cacheFs().opt.ChunkSize, r.cachedObject.Size())
|
||||
r.offset, chunkStart, len(data), offset, r.cacheFs().chunkSize, r.cachedObject.Size())
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
data = data[int(offset):]
|
||||
@@ -295,7 +304,14 @@ func (r *Handle) Close() error {
|
||||
close(r.preloadQueue)
|
||||
r.closed = true
|
||||
// wait for workers to complete their jobs before returning
|
||||
r.workersWg.Wait()
|
||||
waitCount := 3
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
waitIdx := 0
|
||||
for r.workers[i].isRunning() && waitIdx < waitCount {
|
||||
time.Sleep(time.Second)
|
||||
waitIdx++
|
||||
}
|
||||
}
|
||||
r.memory.db.Flush()
|
||||
|
||||
fs.Debugf(r, "cache reader closed %v", r.offset)
|
||||
@@ -322,9 +338,9 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
||||
}
|
||||
|
||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||
chunkStart := r.offset - (r.offset % r.cacheFs().chunkSize)
|
||||
if chunkStart >= r.cacheFs().chunkSize {
|
||||
chunkStart = chunkStart - r.cacheFs().chunkSize
|
||||
}
|
||||
r.queueOffset(chunkStart)
|
||||
|
||||
@@ -332,9 +348,12 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
}
|
||||
|
||||
type worker struct {
|
||||
r *Handle
|
||||
rc io.ReadCloser
|
||||
id int
|
||||
r *Handle
|
||||
ch <-chan int64
|
||||
rc io.ReadCloser
|
||||
id int
|
||||
running bool
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// String is a representation of this worker
|
||||
@@ -379,19 +398,33 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
||||
})
|
||||
}
|
||||
|
||||
func (w *worker) isRunning() bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.running
|
||||
}
|
||||
|
||||
func (w *worker) setRunning(f bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.running = f
|
||||
}
|
||||
|
||||
// run is the main loop for the worker which receives offsets to preload
|
||||
func (w *worker) run() {
|
||||
var err error
|
||||
var data []byte
|
||||
defer w.setRunning(false)
|
||||
defer func() {
|
||||
if w.rc != nil {
|
||||
_ = w.rc.Close()
|
||||
w.setRunning(false)
|
||||
}
|
||||
w.r.workersWg.Done()
|
||||
}()
|
||||
|
||||
for {
|
||||
chunkStart, open := <-w.r.preloadQueue
|
||||
chunkStart, open := <-w.ch
|
||||
w.setRunning(true)
|
||||
if chunkStart < 0 || !open {
|
||||
break
|
||||
}
|
||||
@@ -418,7 +451,7 @@ func (w *worker) run() {
|
||||
}
|
||||
}
|
||||
|
||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||
chunkEnd := chunkStart + w.r.cacheFs().chunkSize
|
||||
// TODO: Remove this comment if it proves to be reliable for #1896
|
||||
//if chunkEnd > w.r.cachedObject.Size() {
|
||||
// chunkEnd = w.r.cachedObject.Size()
|
||||
@@ -433,7 +466,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
||||
var data []byte
|
||||
|
||||
// stop retries
|
||||
if retry >= w.r.cacheFs().opt.ReadRetries {
|
||||
if retry >= w.r.cacheFs().readRetries {
|
||||
return
|
||||
}
|
||||
// back-off between retries
|
||||
@@ -579,7 +612,7 @@ func (b *backgroundWriter) run() {
|
||||
return
|
||||
}
|
||||
|
||||
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), time.Duration(b.fs.opt.TempWaitTime))
|
||||
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), b.fs.tempWriteWait)
|
||||
if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
|
||||
29
backend/cache/object.go
vendored
29
backend/cache/object.go
vendored
@@ -44,7 +44,7 @@ func NewObject(f *Fs, remote string) *Object {
|
||||
|
||||
cacheType := objectInCache
|
||||
parentFs := f.UnWrap()
|
||||
if f.opt.TempWritePath != "" {
|
||||
if f.tempWritePath != "" {
|
||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||
if err == nil { // queued for upload
|
||||
cacheType = objectPendingUpload
|
||||
@@ -75,7 +75,7 @@ func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
||||
|
||||
cacheType := objectInCache
|
||||
parentFs := f.UnWrap()
|
||||
if f.opt.TempWritePath != "" {
|
||||
if f.tempWritePath != "" {
|
||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||
if err == nil { // queued for upload
|
||||
cacheType = objectPendingUpload
|
||||
@@ -153,7 +153,7 @@ func (o *Object) Storable() bool {
|
||||
// 2. is not pending a notification from the wrapped fs
|
||||
func (o *Object) refresh() error {
|
||||
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
||||
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
|
||||
isExpired := time.Now().After(o.CacheTs.Add(o.CacheFs.fileAge))
|
||||
if !isExpired && !isNotified {
|
||||
return nil
|
||||
}
|
||||
@@ -208,17 +208,11 @@ func (o *Object) SetModTime(t time.Time) error {
|
||||
|
||||
// Open is used to request a specific part of the file using fs.RangeOption
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
var err error
|
||||
|
||||
if o.Object == nil {
|
||||
err = o.refreshFromSource(true)
|
||||
} else {
|
||||
err = o.refresh()
|
||||
}
|
||||
if err != nil {
|
||||
if err := o.refreshFromSource(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
cacheReader := NewObjectHandle(o, o.CacheFs)
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
@@ -243,7 +237,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
if o.CacheFs.opt.TempWritePath != "" {
|
||||
if o.CacheFs.tempWritePath != "" {
|
||||
o.CacheFs.backgroundRunner.pause()
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
@@ -280,7 +274,7 @@ func (o *Object) Remove() error {
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
if o.CacheFs.opt.TempWritePath != "" {
|
||||
if o.CacheFs.tempWritePath != "" {
|
||||
o.CacheFs.backgroundRunner.pause()
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
@@ -359,13 +353,6 @@ func (o *Object) tempFileStartedUpload() bool {
|
||||
return started
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *Object) UnWrap() fs.Object {
|
||||
return o.Object
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
|
||||
46
backend/cache/plex.go
vendored
46
backend/cache/plex.go
vendored
@@ -3,19 +3,21 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
@@ -53,17 +55,15 @@ type plexConnector struct {
|
||||
username string
|
||||
password string
|
||||
token string
|
||||
insecure bool
|
||||
f *Fs
|
||||
mu sync.Mutex
|
||||
running bool
|
||||
runningMu sync.Mutex
|
||||
stateCache *cache.Cache
|
||||
saveToken func(string)
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool, saveToken func(string)) (*plexConnector, error) {
|
||||
func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -75,16 +75,14 @@ func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool,
|
||||
username: username,
|
||||
password: password,
|
||||
token: "",
|
||||
insecure: insecure,
|
||||
stateCache: cache.New(time.Hour, time.Minute),
|
||||
saveToken: saveToken,
|
||||
}
|
||||
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*plexConnector, error) {
|
||||
func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -94,7 +92,6 @@ func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*pl
|
||||
f: f,
|
||||
url: u,
|
||||
token: token,
|
||||
insecure: insecure,
|
||||
stateCache: cache.New(time.Hour, time.Minute),
|
||||
}
|
||||
pc.listenWebsocket()
|
||||
@@ -109,26 +106,14 @@ func (p *plexConnector) closeWebsocket() {
|
||||
p.running = false
|
||||
}
|
||||
|
||||
func (p *plexConnector) websocketDial() (*websocket.Conn, error) {
|
||||
u := strings.TrimRight(strings.Replace(strings.Replace(
|
||||
p.url.String(), "http://", "ws://", 1), "https://", "wss://", 1), "/")
|
||||
url := fmt.Sprintf(defPlexNotificationURL, u, p.token)
|
||||
|
||||
config, err := websocket.NewConfig(url, "http://localhost")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.insecure {
|
||||
config.TlsConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
}
|
||||
return websocket.DialConfig(config)
|
||||
}
|
||||
|
||||
func (p *plexConnector) listenWebsocket() {
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
|
||||
conn, err := p.websocketDial()
|
||||
u := strings.Replace(p.url.String(), "http://", "ws://", 1)
|
||||
u = strings.Replace(u, "https://", "wss://", 1)
|
||||
conn, err := websocket.Dial(fmt.Sprintf(defPlexNotificationURL, strings.TrimRight(u, "/"), p.token),
|
||||
"", "http://localhost")
|
||||
if err != nil {
|
||||
fs.Errorf("plex", "%v", err)
|
||||
return
|
||||
@@ -224,9 +209,8 @@ func (p *plexConnector) authenticate() error {
|
||||
}
|
||||
p.token = token
|
||||
if p.token != "" {
|
||||
if p.saveToken != nil {
|
||||
p.saveToken(p.token)
|
||||
}
|
||||
config.FileSet(p.f.Name(), "plex_token", p.token)
|
||||
config.SaveConfig()
|
||||
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
|
||||
}
|
||||
p.listenWebsocket()
|
||||
|
||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
||||
18
backend/cache/storage_persistent.go
vendored
18
backend/cache/storage_persistent.go
vendored
@@ -3,17 +3,20 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"io/ioutil"
|
||||
|
||||
"fmt"
|
||||
|
||||
bolt "github.com/coreos/bbolt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
@@ -31,8 +34,7 @@ const (
|
||||
|
||||
// Features flags for this storage type
|
||||
type Features struct {
|
||||
PurgeDb bool // purge the db before starting
|
||||
DbWaitTime time.Duration // time to wait for DB to be available
|
||||
PurgeDb bool // purge the db before starting
|
||||
}
|
||||
|
||||
var boltMap = make(map[string]*Persistent)
|
||||
@@ -120,7 +122,7 @@ func (b *Persistent) connect() error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
|
||||
}
|
||||
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
|
||||
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: *cacheDbWaitTime})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
|
||||
}
|
||||
@@ -340,7 +342,7 @@ func (b *Persistent) RemoveDir(fp string) error {
|
||||
// ExpireDir will flush a CachedDirectory and all its objects from the objects
|
||||
// chunks will remain as they are
|
||||
func (b *Persistent) ExpireDir(cd *Directory) error {
|
||||
t := time.Now().Add(time.Duration(-cd.CacheFs.opt.InfoAge))
|
||||
t := time.Now().Add(cd.CacheFs.fileAge * -1)
|
||||
cd.CacheTs = &t
|
||||
|
||||
// expire all parents
|
||||
@@ -427,7 +429,7 @@ func (b *Persistent) RemoveObject(fp string) error {
|
||||
|
||||
// ExpireObject will flush an Object and all its data if desired
|
||||
func (b *Persistent) ExpireObject(co *Object, withData bool) error {
|
||||
co.CacheTs = time.Now().Add(time.Duration(-co.CacheFs.opt.InfoAge))
|
||||
co.CacheTs = time.Now().Add(co.CacheFs.fileAge * -1)
|
||||
err := b.AddObject(co)
|
||||
if withData {
|
||||
_ = os.RemoveAll(path.Join(b.dataPath, co.abs()))
|
||||
|
||||
@@ -17,9 +17,11 @@ import (
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rfjakob/eme"
|
||||
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
|
||||
"github.com/rfjakob/eme"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -41,7 +43,6 @@ var (
|
||||
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
|
||||
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
|
||||
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
|
||||
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
|
||||
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
|
||||
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
|
||||
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
|
||||
@@ -144,7 +145,6 @@ type cipher struct {
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
passCorrupted bool
|
||||
}
|
||||
|
||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||
@@ -164,11 +164,6 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Set to pass corrupted blocks
|
||||
func (c *cipher) setPassCorrupted(passCorrupted bool) {
|
||||
c.passCorrupted = passCorrupted
|
||||
}
|
||||
|
||||
// Key creates all the internal keys from the password passed in using
|
||||
// scrypt.
|
||||
//
|
||||
@@ -291,9 +286,6 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
// not possible if decodeFilename() working correctly
|
||||
return "", ErrorTooShortAfterDecode
|
||||
}
|
||||
if len(rawCiphertext) > 2048 {
|
||||
return "", ErrorTooLongAfterDecode
|
||||
}
|
||||
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
|
||||
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
|
||||
if err != nil {
|
||||
@@ -828,10 +820,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||
if err != nil {
|
||||
return err // return pending error as it is likely more accurate
|
||||
}
|
||||
if !fh.c.passCorrupted {
|
||||
return ErrorEncryptedBadBlock
|
||||
}
|
||||
fs.Errorf(nil, "passing corrupted block")
|
||||
return ErrorEncryptedBadBlock
|
||||
}
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = n - blockHeaderSize
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestNewNameEncryptionMode(t *testing.T) {
|
||||
{"off", NameEncryptionOff, ""},
|
||||
{"standard", NameEncryptionStandard, ""},
|
||||
{"obfuscate", NameEncryptionObfuscated, ""},
|
||||
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
|
||||
{"potato", NameEncryptionMode(0), "Unknown file name encryption mode \"potato\""},
|
||||
} {
|
||||
actual, actualErr := NewNameEncryptionMode(test.in)
|
||||
assert.Equal(t, actual, test.expected)
|
||||
@@ -194,10 +194,6 @@ func TestEncryptSegment(t *testing.T) {
|
||||
|
||||
func TestDecryptSegment(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 3328)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -205,7 +201,6 @@ func TestDecryptSegment(t *testing.T) {
|
||||
}{
|
||||
{"64=", ErrorBadBase32Encoding},
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
|
||||
@@ -4,19 +4,25 @@ package crypt
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
cryptShowMapping = flags.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -24,13 +30,11 @@ func init() {
|
||||
Description: "Encrypt/Decrypt a remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Required: true,
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
}, {
|
||||
Name: "filename_encryption",
|
||||
Help: "How to encrypt the filenames.",
|
||||
Default: "standard",
|
||||
Name: "filename_encryption",
|
||||
Help: "How to encrypt the filenames.",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "off",
|
||||
@@ -44,9 +48,8 @@ func init() {
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "directory_name_encryption",
|
||||
Help: "Option to either encrypt directory names or leave them intact.",
|
||||
Default: true,
|
||||
Name: "directory_name_encryption",
|
||||
Help: "Option to either encrypt directory names or leave them intact.",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "true",
|
||||
@@ -65,108 +68,68 @@ func init() {
|
||||
Name: "password2",
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "show_mapping",
|
||||
Help: `For all files listed show how the names encrypt.
|
||||
|
||||
If this flag is set then for each file that the remote is asked to
|
||||
list, it will log (at level INFO) a line stating the decrypted file
|
||||
name and the encrypted file name.
|
||||
|
||||
This is so you can work out which encrypted names are which decrypted
|
||||
names just in case you need to do something with the encrypted file
|
||||
names, or for debugging purposes.`,
|
||||
Default: false,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "pass_corrupted_blocks",
|
||||
Help: `Pass through corrupted blocks to the output.
|
||||
|
||||
This is for debugging corruption problems in crypt - it shouldn't be needed normally.
|
||||
`,
|
||||
Default: false,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Optional: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// newCipherForConfig constructs a Cipher for the given config name
|
||||
func newCipherForConfig(opt *Options) (Cipher, error) {
|
||||
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
|
||||
// NewCipher constructs a Cipher for the given config name
|
||||
func NewCipher(name string) (Cipher, error) {
|
||||
mode, err := NewNameEncryptionMode(config.FileGet(name, "filename_encryption", "standard"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.Password == "" {
|
||||
dirNameEncrypt, err := strconv.ParseBool(config.FileGet(name, "directory_name_encryption", "true"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
password := config.FileGet(name, "password", "")
|
||||
if password == "" {
|
||||
return nil, errors.New("password not set in config file")
|
||||
}
|
||||
password, err := obscure.Reveal(opt.Password)
|
||||
password, err = obscure.Reveal(password)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password")
|
||||
}
|
||||
var salt string
|
||||
if opt.Password2 != "" {
|
||||
salt, err = obscure.Reveal(opt.Password2)
|
||||
salt := config.FileGet(name, "password2", "")
|
||||
if salt != "" {
|
||||
salt, err = obscure.Reveal(salt)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password2")
|
||||
}
|
||||
}
|
||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
|
||||
cipher, err := newCipher(mode, password, salt, dirNameEncrypt)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make cipher")
|
||||
}
|
||||
cipher.setPassCorrupted(opt.PassCorruptedBlocks)
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
// NewCipher constructs a Cipher for the given config
|
||||
func NewCipher(m configmap.Mapper) (Cipher, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newCipherForConfig(opt)
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
cipher, err := NewCipher(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cipher, err := newCipherForConfig(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote := opt.Remote
|
||||
remote := config.FileGet(name, "remote")
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||
}
|
||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
remotePath := path.Join(remote, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err := fs.NewFs(remotePath)
|
||||
// if that didn't produce a file, look for a directory
|
||||
if err != fs.ErrorIsFile {
|
||||
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
|
||||
remotePath = path.Join(remote, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = fs.NewFs(remotePath)
|
||||
}
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
|
||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remotePath)
|
||||
}
|
||||
f := &Fs{
|
||||
Fs: wrappedFs,
|
||||
name: name,
|
||||
root: rpath,
|
||||
opt: *opt,
|
||||
cipher: cipher,
|
||||
}
|
||||
// the features here are ones we could support, and they are
|
||||
@@ -182,7 +145,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
doChangeNotify := wrappedFs.Features().ChangeNotify
|
||||
if doChangeNotify != nil {
|
||||
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
decrypted, err := f.DecryptFileName(path)
|
||||
if err != nil {
|
||||
@@ -191,30 +154,18 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
notifyFunc(decrypted, entryType)
|
||||
}
|
||||
doChangeNotify(wrappedNotifyFunc, pollInterval)
|
||||
return doChangeNotify(wrappedNotifyFunc, pollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
FilenameEncryption string `config:"filename_encryption"`
|
||||
DirectoryNameEncryption bool `config:"directory_name_encryption"`
|
||||
Password string `config:"password"`
|
||||
Password2 string `config:"password2"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
PassCorruptedBlocks bool `config:"pass_corrupted_blocks"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features // optional features
|
||||
cipher Cipher
|
||||
}
|
||||
@@ -247,7 +198,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
||||
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
|
||||
return
|
||||
}
|
||||
if f.opt.ShowMapping {
|
||||
if *cryptShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newObject(obj))
|
||||
@@ -261,7 +212,7 @@ func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
|
||||
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
|
||||
return
|
||||
}
|
||||
if f.opt.ShowMapping {
|
||||
if *cryptShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newDir(dir))
|
||||
@@ -354,13 +305,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// unwrap the accounting
|
||||
var wrap accounting.WrapFn
|
||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
||||
// add the hasher
|
||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||
// wrap the accounting back on
|
||||
wrappedIn = wrap(wrappedIn)
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
@@ -733,15 +678,15 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
|
||||
// newDir returns a dir with the Name decrypted
|
||||
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
|
||||
newDir := fs.NewDirCopy(dir)
|
||||
new := fs.NewDirCopy(dir)
|
||||
remote := dir.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
||||
} else {
|
||||
newDir.SetRemote(decryptedRemote)
|
||||
new.SetRemote(decryptedRemote)
|
||||
}
|
||||
return newDir
|
||||
return new
|
||||
}
|
||||
|
||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||
|
||||
@@ -7,30 +7,13 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive" // for integration tests
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/swift" // for integration tests
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// TestStandard runs integration tests against the remote
|
||||
func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -47,9 +30,6 @@ func TestStandard(t *testing.T) {
|
||||
|
||||
// TestOff runs integration tests against the remote
|
||||
func TestOff(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
|
||||
name := "TestCrypt2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -66,9 +46,6 @@ func TestOff(t *testing.T) {
|
||||
|
||||
// TestObfuscate runs integration tests against the remote
|
||||
func TestObfuscate(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,82 +1,63 @@
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"google.golang.org/api/drive/v3"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/drive/v3"
|
||||
)
|
||||
|
||||
func TestDriveScopes(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
wantFlag bool
|
||||
}{
|
||||
{"", []string{
|
||||
"https://www.googleapis.com/auth/drive",
|
||||
}, false},
|
||||
{" drive.file , drive.readonly", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.readonly",
|
||||
}, false},
|
||||
{" drive.file , drive.appfolder", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.appfolder",
|
||||
}, true},
|
||||
} {
|
||||
got := driveScopes(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
gotFlag := driveScopesContainsAppFolder(got)
|
||||
assert.Equal(t, test.wantFlag, gotFlag, test.in)
|
||||
}
|
||||
}
|
||||
const exampleExportFormats = `{
|
||||
"application/vnd.google-apps.document": [
|
||||
"application/rtf",
|
||||
"application/vnd.oasis.opendocument.text",
|
||||
"text/html",
|
||||
"application/pdf",
|
||||
"application/epub+zip",
|
||||
"application/zip",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.spreadsheet": [
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet",
|
||||
"text/tab-separated-values",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"text/csv",
|
||||
"application/zip",
|
||||
"application/vnd.oasis.opendocument.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.jam": [
|
||||
"application/pdf"
|
||||
],
|
||||
"application/vnd.google-apps.script": [
|
||||
"application/vnd.google-apps.script+json"
|
||||
],
|
||||
"application/vnd.google-apps.presentation": [
|
||||
"application/vnd.oasis.opendocument.presentation",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.form": [
|
||||
"application/zip"
|
||||
],
|
||||
"application/vnd.google-apps.drawing": [
|
||||
"image/svg+xml",
|
||||
"image/png",
|
||||
"application/pdf",
|
||||
"image/jpeg"
|
||||
]
|
||||
}`
|
||||
|
||||
/*
|
||||
var additionalMimeTypes = map[string]string{
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
|
||||
"application/vnd.ms-excel.template.macroenabled.12": ".xltm",
|
||||
"application/vnd.ms-powerpoint.presentation.macroenabled.12": ".pptm",
|
||||
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": ".ppsm",
|
||||
"application/vnd.ms-powerpoint.template.macroenabled.12": ".potm",
|
||||
"application/vnd.ms-powerpoint": ".ppt",
|
||||
"application/vnd.ms-word.document.macroenabled.12": ".docm",
|
||||
"application/vnd.ms-word.template.macroenabled.12": ".dotm",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.template": ".potx",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": ".xltx",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": ".dotx",
|
||||
"application/vnd.sun.xml.writer": ".sxw",
|
||||
"text/richtext": ".rtf",
|
||||
}
|
||||
*/
|
||||
var exportFormats map[string][]string
|
||||
|
||||
// Load the example export formats into exportFormats for testing
|
||||
func TestInternalLoadExampleFormats(t *testing.T) {
|
||||
fetchFormatsOnce.Do(func() {})
|
||||
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
|
||||
var about struct {
|
||||
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
|
||||
ImportFormats map[string][]string `json:"importFormats,omitempty"`
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, json.Unmarshal(buf, &about))
|
||||
_exportFormats = fixMimeTypeMap(about.ExportFormats)
|
||||
_importFormats = fixMimeTypeMap(about.ImportFormats)
|
||||
func TestInternalLoadExampleExportFormats(t *testing.T) {
|
||||
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &exportFormats))
|
||||
}
|
||||
|
||||
func TestInternalParseExtensions(t *testing.T) {
|
||||
@@ -85,204 +66,47 @@ func TestInternalParseExtensions(t *testing.T) {
|
||||
want []string
|
||||
wantErr error
|
||||
}{
|
||||
{"doc", []string{".doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
|
||||
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
||||
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
||||
{"doc", []string{"doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
|
||||
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
|
||||
{"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
|
||||
} {
|
||||
extensions, _, gotErr := parseExtensions(test.in)
|
||||
f := new(Fs)
|
||||
gotErr := f.parseExtensions(test.in)
|
||||
if test.wantErr == nil {
|
||||
assert.NoError(t, gotErr)
|
||||
} else {
|
||||
assert.EqualError(t, gotErr, test.wantErr.Error())
|
||||
}
|
||||
assert.Equal(t, test.want, extensions)
|
||||
assert.Equal(t, test.want, f.extensions)
|
||||
}
|
||||
|
||||
// Test it is appending
|
||||
extensions, _, gotErr := parseExtensions("docx,svg", "docx,svg,xlsx")
|
||||
assert.NoError(t, gotErr)
|
||||
assert.Equal(t, []string{".docx", ".svg", ".xlsx"}, extensions)
|
||||
f := new(Fs)
|
||||
assert.Nil(t, f.parseExtensions("docx,svg"))
|
||||
assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
|
||||
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
|
||||
|
||||
}
|
||||
|
||||
func TestInternalFindExportFormat(t *testing.T) {
|
||||
item := &drive.File{
|
||||
Name: "file",
|
||||
MimeType: "application/vnd.google-apps.document",
|
||||
}
|
||||
item := new(drive.File)
|
||||
item.MimeType = "application/vnd.google-apps.document"
|
||||
for _, test := range []struct {
|
||||
extensions []string
|
||||
wantExtension string
|
||||
wantMimeType string
|
||||
}{
|
||||
{[]string{}, "", ""},
|
||||
{[]string{".pdf"}, ".pdf", "application/pdf"},
|
||||
{[]string{".pdf", ".rtf", ".xls"}, ".pdf", "application/pdf"},
|
||||
{[]string{".xls", ".rtf", ".pdf"}, ".rtf", "application/rtf"},
|
||||
{[]string{".xls", ".csv", ".svg"}, "", ""},
|
||||
{[]string{"pdf"}, "pdf", "application/pdf"},
|
||||
{[]string{"pdf", "rtf", "xls"}, "pdf", "application/pdf"},
|
||||
{[]string{"xls", "rtf", "pdf"}, "rtf", "application/rtf"},
|
||||
{[]string{"xls", "csv", "svg"}, "", ""},
|
||||
} {
|
||||
f := new(Fs)
|
||||
f.exportExtensions = test.extensions
|
||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
|
||||
f.extensions = test.extensions
|
||||
gotExtension, gotMimeType := f.findExportFormat("file", exportFormats[item.MimeType])
|
||||
assert.Equal(t, test.wantExtension, gotExtension)
|
||||
if test.wantExtension != "" {
|
||||
assert.Equal(t, item.Name+gotExtension, gotFilename)
|
||||
} else {
|
||||
assert.Equal(t, "", gotFilename)
|
||||
}
|
||||
assert.Equal(t, test.wantMimeType, gotMimeType)
|
||||
assert.Equal(t, true, gotIsDocument)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMimeTypesToExtension(t *testing.T) {
|
||||
for mimeType, extension := range _mimeTypeToExtension {
|
||||
extensions, err := mime.ExtensionsByType(mimeType)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, extensions, extension)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionToMimeType(t *testing.T) {
|
||||
for mimeType, extension := range _mimeTypeToExtension {
|
||||
gotMimeType := mime.TypeByExtension(extension)
|
||||
mediatype, _, err := mime.ParseMediaType(gotMimeType)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, mimeType, mediatype)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionsForExportFormats(t *testing.T) {
|
||||
if _exportFormats == nil {
|
||||
t.Error("exportFormats == nil")
|
||||
}
|
||||
for fromMT, toMTs := range _exportFormats {
|
||||
for _, toMT := range toMTs {
|
||||
if !isInternalMimeType(toMT) {
|
||||
extensions, err := mime.ExtensionsByType(toMT)
|
||||
assert.NoError(t, err, "invalid MIME type %q", toMT)
|
||||
assert.NotEmpty(t, extensions, "No extension found for %q (from: %q)", fromMT, toMT)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionsForImportFormats(t *testing.T) {
|
||||
t.Skip()
|
||||
if _importFormats == nil {
|
||||
t.Error("_importFormats == nil")
|
||||
}
|
||||
for fromMT := range _importFormats {
|
||||
if !isInternalMimeType(fromMT) {
|
||||
extensions, err := mime.ExtensionsByType(fromMT)
|
||||
assert.NoError(t, err, "invalid MIME type %q", fromMT)
|
||||
assert.NotEmpty(t, extensions, "No extension found for %q", fromMT)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
oldAllow := f.opt.AllowImportNameChange
|
||||
f.opt.AllowImportNameChange = true
|
||||
defer func() {
|
||||
f.opt.AllowImportNameChange = oldAllow
|
||||
}()
|
||||
|
||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||
require.NoError(t, err)
|
||||
|
||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
|
||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||
require.NoError(t, err)
|
||||
|
||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentExport(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
|
||||
f.exportExtensions, _, err = parseExtensions("txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := f.NewObject("example2.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open()
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
_, err = io.Copy(&buf, rc)
|
||||
require.NoError(t, err)
|
||||
text := buf.String()
|
||||
|
||||
for _, excerpt := range []string{
|
||||
"Lorem ipsum dolor sit amet, consectetur",
|
||||
"porta at ultrices in, consectetur at augue.",
|
||||
} {
|
||||
require.Contains(t, text, excerpt)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
|
||||
f.exportExtensions, _, err = parseExtensions("link.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := f.NewObject("example2.link.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open()
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
_, err = io.Copy(&buf, rc)
|
||||
require.NoError(t, err)
|
||||
text := buf.String()
|
||||
|
||||
require.True(t, strings.HasPrefix(text, "<html>"))
|
||||
require.True(t, strings.HasSuffix(text, "</html>\n"))
|
||||
for _, excerpt := range []string{
|
||||
`<meta http-equiv="refresh"`,
|
||||
`Loading <a href="`,
|
||||
} {
|
||||
require.Contains(t, text, excerpt)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
f.InternalTestDocumentImport(t)
|
||||
t.Run("DocumentUpdate", func(t *testing.T) {
|
||||
f.InternalTestDocumentUpdate(t)
|
||||
t.Run("DocumentExport", func(t *testing.T) {
|
||||
f.InternalTestDocumentExport(t)
|
||||
t.Run("DocumentLink", func(t *testing.T) {
|
||||
f.InternalTestDocumentLink(t)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
// Test Drive filesystem interface
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
package drive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -15,23 +12,6 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDrive:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
CeilChunkSize: fstests.NextPowerOfTwo,
|
||||
},
|
||||
NilObject: (*drive.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package drive
|
||||
@@ -1,178 +0,0 @@
|
||||
{
|
||||
"importFormats": {
|
||||
"text/tab-separated-values": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/x-vnd.oasis.opendocument.presentation": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"image/jpeg": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"image/bmp": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"image/gif": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-powerpoint.presentation.macroenabled.12": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.ms-word.template.macroenabled.12": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"image/pjpeg": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.google-apps.script+text/plain": [
|
||||
"application/vnd.google-apps.script"
|
||||
],
|
||||
"application/vnd.ms-excel": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.sun.xml.writer": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-word.document.macroenabled.12": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"text/rtf": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"text/plain": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.oasis.opendocument.spreadsheet": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"image/png": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/x-vnd.oasis.opendocument.text": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/msword": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/pdf": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/json": [
|
||||
"application/vnd.google-apps.script"
|
||||
],
|
||||
"application/x-msmetafile": [
|
||||
"application/vnd.google-apps.drawing"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.ms-powerpoint": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.ms-excel.template.macroenabled.12": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"image/x-bmp": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/rtf": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.template": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"image/x-png": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"text/html": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.oasis.opendocument.text": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.script+json": [
|
||||
"application/vnd.google-apps.script"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.slideshow": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.ms-powerpoint.template.macroenabled.12": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"text/csv": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.oasis.opendocument.presentation": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"image/jpg": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"text/richtext": [
|
||||
"application/vnd.google-apps.document"
|
||||
]
|
||||
},
|
||||
"exportFormats": {
|
||||
"application/vnd.google-apps.document": [
|
||||
"application/rtf",
|
||||
"application/vnd.oasis.opendocument.text",
|
||||
"text/html",
|
||||
"application/pdf",
|
||||
"application/epub+zip",
|
||||
"application/zip",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.spreadsheet": [
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet",
|
||||
"text/tab-separated-values",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"text/csv",
|
||||
"application/zip",
|
||||
"application/vnd.oasis.opendocument.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.jam": [
|
||||
"application/pdf"
|
||||
],
|
||||
"application/vnd.google-apps.script": [
|
||||
"application/vnd.google-apps.script+json"
|
||||
],
|
||||
"application/vnd.google-apps.presentation": [
|
||||
"application/vnd.oasis.opendocument.presentation",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.form": [
|
||||
"application/zip"
|
||||
],
|
||||
"application/vnd.google-apps.drawing": [
|
||||
"image/svg+xml",
|
||||
"image/png",
|
||||
"application/pdf",
|
||||
"image/jpeg"
|
||||
]
|
||||
}
|
||||
}
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -8,8 +8,6 @@
|
||||
//
|
||||
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
@@ -52,16 +50,15 @@ type resumableUpload struct {
|
||||
}
|
||||
|
||||
// Upload the io.Reader in of size bytes with contentType and info
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
|
||||
params := url.Values{
|
||||
"alt": {"json"},
|
||||
"uploadType": {"resumable"},
|
||||
"fields": {partialFields},
|
||||
}
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
|
||||
params := make(url.Values)
|
||||
params.Set("alt", "json")
|
||||
params.Set("uploadType", "resumable")
|
||||
params.Set("fields", partialFields)
|
||||
if f.isTeamDrive {
|
||||
params.Set("supportsTeamDrives", "true")
|
||||
}
|
||||
if f.opt.KeepRevisionForever {
|
||||
if *driveKeepRevisionForever {
|
||||
params.Set("keepRevisionForever", "true")
|
||||
}
|
||||
urls := "https://www.googleapis.com/upload/drive/v3/files"
|
||||
@@ -200,11 +197,11 @@ func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
start := int64(0)
|
||||
var StatusCode int
|
||||
var err error
|
||||
buf := make([]byte, int(rx.f.opt.ChunkSize))
|
||||
buf := make([]byte, int(chunkSize))
|
||||
for start < rx.ContentLength {
|
||||
reqSize := rx.ContentLength - start
|
||||
if reqSize >= int64(rx.f.opt.ChunkSize) {
|
||||
reqSize = int64(rx.f.opt.ChunkSize)
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
}
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
||||
|
||||
|
||||
@@ -31,16 +31,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -58,6 +55,24 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauth2.Config{
|
||||
Scopes: []string{},
|
||||
// Endpoint: oauth2.Endpoint{
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||
// },
|
||||
Endpoint: dropbox.OAuthEndpoint(""),
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||
// Upload chunk size - setting too small makes uploads slow.
|
||||
// Chunks are buffered into memory for retries.
|
||||
//
|
||||
@@ -81,26 +96,8 @@ const (
|
||||
// Choose 48MB which is 91% of Maximum speed. rclone by
|
||||
// default does 4 transfers so this should use 4*48MB = 192MB
|
||||
// by default.
|
||||
defaultChunkSize = 48 * fs.MebiByte
|
||||
maxChunkSize = 150 * fs.MebiByte
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauth2.Config{
|
||||
Scopes: []string{},
|
||||
// Endpoint: oauth2.Endpoint{
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||
// },
|
||||
Endpoint: dropbox.OAuthEndpoint(""),
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||
uploadChunkSize = fs.SizeSuffix(48 * 1024 * 1024)
|
||||
maxUploadChunkSize = fs.SizeSuffix(150 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -109,55 +106,31 @@ func init() {
|
||||
Name: "dropbox",
|
||||
Description: "Dropbox",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.ConfigNoOffline("dropbox", name, m, dropboxConfig)
|
||||
Config: func(name string) {
|
||||
err := oauthutil.ConfigNoOffline("dropbox", name, dropboxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Dropbox App Client Id\nLeave blank normally.",
|
||||
Help: "Dropbox App Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Dropbox App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: fmt.Sprintf(`Upload chunk size. (< %v).
|
||||
|
||||
Any files larger than this will be uploaded in chunks of this size.
|
||||
|
||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
||||
deal with retries. Setting this larger will increase the speed
|
||||
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
||||
memory. It can be set smaller if you are tight on memory.`, fs.SizeSuffix(maxChunkSize)),
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Help: "Impersonate this user when using a business account.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Help: "Dropbox App Client Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
flags.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv files.Client // the connection to the dropbox server
|
||||
sharing sharing.Client // as above, but for generating sharing links
|
||||
users users.Client // as above, but for accessing user information
|
||||
team team.Client // for the Teams API
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
@@ -204,59 +177,23 @@ func shouldRetry(err error) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
baseErrString := errors.Cause(err).Error()
|
||||
// handle any official Retry-After header from Dropbox's SDK first
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behaviour for backward compatibility
|
||||
// FIXME there is probably a better way of doing this!
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
if cs > maxChunkSize {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "dropbox: chunk size")
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadChunkSize > maxUploadChunkSize {
|
||||
return nil, errors.Errorf("chunk size too big, must be < %v", maxUploadChunkSize)
|
||||
}
|
||||
|
||||
// Convert the old token if it exists. The old token was just
|
||||
// just a string, the new one is a JSON blob
|
||||
oldToken, ok := m.Get(config.ConfigToken)
|
||||
oldToken = strings.TrimSpace(oldToken)
|
||||
if ok && oldToken != "" && oldToken[0] != '{' {
|
||||
oldToken := strings.TrimSpace(config.FileGet(name, config.ConfigToken))
|
||||
if oldToken != "" && oldToken[0] != '{' {
|
||||
fs.Infof(name, "Converting token to new format")
|
||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||
@@ -265,14 +202,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, dropboxConfig)
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, dropboxConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure dropbox")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
config := dropbox.Config{
|
||||
@@ -280,29 +216,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
Client: oAuthClient, // maybe???
|
||||
HeaderGenerator: f.headerGenerator,
|
||||
}
|
||||
|
||||
// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
|
||||
f.team = team.New(config)
|
||||
|
||||
if opt.Impersonate != "" {
|
||||
|
||||
user := team.UserSelectorArg{
|
||||
Email: opt.Impersonate,
|
||||
}
|
||||
user.Tag = "email"
|
||||
|
||||
members := []*team.UserSelectorArg{&user}
|
||||
args := team.NewMembersGetInfoArgs(members)
|
||||
|
||||
memberIds, err := f.team.MembersGetInfo(args)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
|
||||
}
|
||||
|
||||
config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||
}
|
||||
|
||||
f.srv = files.New(config)
|
||||
f.sharing = sharing.New(config)
|
||||
f.users = users.New(config)
|
||||
@@ -998,7 +911,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
|
||||
// avoidable request to the Dropbox API that does not carry payload.
|
||||
func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
chunkSize := int64(uploadChunkSize)
|
||||
chunks := 0
|
||||
if size != -1 {
|
||||
chunks = int(size/chunkSize) + 1
|
||||
@@ -1113,7 +1026,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
size := src.Size()
|
||||
var err error
|
||||
var entry *files.FileMetadata
|
||||
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||
if size > int64(uploadChunkSize) || size == -1 {
|
||||
entry, err = o.uploadChunked(in, commitInfo, size)
|
||||
} else {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test Dropbox filesystem interface
|
||||
package dropbox
|
||||
package dropbox_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/backend/dropbox"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,15 +12,6 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDropbox:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
NilObject: (*dropbox.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -4,15 +4,16 @@ package ftp
|
||||
import (
|
||||
"io"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
@@ -29,40 +30,33 @@ func init() {
|
||||
{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to",
|
||||
Required: true,
|
||||
Optional: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ftp.example.com",
|
||||
Help: "Connect to ftp.example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
Optional: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21)",
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21) ",
|
||||
Optional: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
Optional: false,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
url string
|
||||
user string
|
||||
@@ -167,33 +161,51 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
func NewFs(name, root string) (ff fs.Fs, err error) {
|
||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// FIXME Convert the old scheme used for the first beta - remove after release
|
||||
if ftpURL := config.FileGet(name, "url"); ftpURL != "" {
|
||||
fs.Infof(name, "Converting old configuration")
|
||||
u, err := url.Parse(ftpURL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to parse old url %q", ftpURL)
|
||||
}
|
||||
parts := strings.Split(u.Host, ":")
|
||||
config.FileSet(name, "host", parts[0])
|
||||
if len(parts) > 1 {
|
||||
config.FileSet(name, "port", parts[1])
|
||||
}
|
||||
config.FileSet(name, "host", u.Host)
|
||||
config.FileSet(name, "user", config.FileGet(name, "username"))
|
||||
config.FileSet(name, "pass", config.FileGet(name, "password"))
|
||||
config.FileDeleteKey(name, "username")
|
||||
config.FileDeleteKey(name, "password")
|
||||
config.FileDeleteKey(name, "url")
|
||||
config.SaveConfig()
|
||||
if u.Path != "" && u.Path != "/" {
|
||||
fs.Errorf(name, "Path %q in FTP URL no longer supported - put it on the end of the remote %s:%s", u.Path, name, u.Path)
|
||||
}
|
||||
}
|
||||
pass, err := obscure.Reveal(opt.Pass)
|
||||
host := config.FileGet(name, "host")
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
port := config.FileGet(name, "port")
|
||||
pass, err = obscure.Reveal(pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFS decrypt password")
|
||||
}
|
||||
user := opt.User
|
||||
if user == "" {
|
||||
user = os.Getenv("USER")
|
||||
}
|
||||
port := opt.Port
|
||||
if port == "" {
|
||||
port = "21"
|
||||
}
|
||||
|
||||
dialAddr := opt.Host + ":" + port
|
||||
dialAddr := host + ":" + port
|
||||
u := "ftp://" + path.Join(dialAddr+"/", root)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
url: u,
|
||||
user: user,
|
||||
pass: pass,
|
||||
@@ -646,21 +658,7 @@ func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
|
||||
|
||||
// Close the FTP reader and return the connection to the pool
|
||||
func (f *ftpReadCloser) Close() error {
|
||||
var err error
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
errchan <- f.rc.Close()
|
||||
}()
|
||||
// Wait for Close for up to 60 seconds
|
||||
timer := time.NewTimer(60 * time.Second)
|
||||
select {
|
||||
case err = <-errchan:
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f.f, "Timeout when waiting for connection Close")
|
||||
return nil
|
||||
}
|
||||
err := f.rc.Close()
|
||||
// if errors while reading or closing, dump the connection
|
||||
if err != nil || f.err != nil {
|
||||
_ = f.c.Quit()
|
||||
@@ -718,11 +716,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
// remove the file if upload failed
|
||||
remove := func() {
|
||||
// Give the FTP server a chance to get its internal state in order after the error.
|
||||
// The error may have been local in which case we closed the connection. The server
|
||||
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
|
||||
// able to think of a better method to find out if the server has finished - ncw
|
||||
time.Sleep(1 * time.Second)
|
||||
removeErr := o.Remove()
|
||||
if removeErr != nil {
|
||||
fs.Debugf(o, "Failed to remove: %v", removeErr)
|
||||
@@ -736,7 +729,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
err = c.Stor(path, in)
|
||||
if err != nil {
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
_ = c.Quit()
|
||||
remove()
|
||||
return errors.Wrap(err, "update stor")
|
||||
}
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
// Package googlecloudstorage provides an interface to Google Cloud Storage
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
|
||||
/*
|
||||
@@ -32,8 +29,7 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
@@ -59,6 +55,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
gcsLocation = flags.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).")
|
||||
gcsStorageClass = flags.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).")
|
||||
// Description of how to auth for this app
|
||||
storageConfig = &oauth2.Config{
|
||||
Scopes: []string{storage.DevstorageFullControlScope},
|
||||
@@ -73,36 +71,29 @@ var (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "google cloud storage",
|
||||
Prefix: "gcs",
|
||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
if saFile != "" || saCreds != "" {
|
||||
Config: func(name string) {
|
||||
if config.FileGet(name, "service_account_file") != "" {
|
||||
return
|
||||
}
|
||||
err := oauthutil.Config("google cloud storage", name, m, storageConfig)
|
||||
err := oauthutil.Config("google cloud storage", name, storageConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
Help: "Google Application Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
Help: "Google Application Client Secret - leave blank normally.",
|
||||
}, {
|
||||
Name: "project_number",
|
||||
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.",
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: "Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login.",
|
||||
}, {
|
||||
Name: "object_acl",
|
||||
Help: "Access Control List for new objects.",
|
||||
@@ -216,29 +207,22 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ProjectNumber string `config:"project_number"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
ObjectACL string `config:"object_acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
projectNumber string // used for finding buckets
|
||||
objectACL string // used when creating new objects
|
||||
bucketACL string // used when creating new buckets
|
||||
location string // location of new buckets
|
||||
storageClass string // storage class of new buckets
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -331,37 +315,27 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
var oAuthClient *http.Client
|
||||
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ObjectACL == "" {
|
||||
opt.ObjectACL = "private"
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = "private"
|
||||
}
|
||||
var err error
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
|
||||
serviceAccountCreds := []byte(config.FileGet(name, "service_account_credentials"))
|
||||
serviceAccountPath := config.FileGet(name, "service_account_file")
|
||||
if len(serviceAccountCreds) == 0 && serviceAccountPath != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(serviceAccountPath))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
}
|
||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||
serviceAccountCreds = loadedCreds
|
||||
}
|
||||
if opt.ServiceAccountCredentials != "" {
|
||||
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
|
||||
if len(serviceAccountCreds) > 0 {
|
||||
oAuthClient, err = getServiceAccountClient(serviceAccountCreds)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, storageConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
}
|
||||
@@ -373,17 +347,33 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
opt: *opt,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
projectNumber: config.FileGet(name, "project_number"),
|
||||
objectACL: config.FileGet(name, "object_acl"),
|
||||
bucketACL: config.FileGet(name, "bucket_acl"),
|
||||
location: config.FileGet(name, "location"),
|
||||
storageClass: config.FileGet(name, "storage_class"),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
if f.objectACL == "" {
|
||||
f.objectACL = "private"
|
||||
}
|
||||
if f.bucketACL == "" {
|
||||
f.bucketACL = "private"
|
||||
}
|
||||
if *gcsLocation != "" {
|
||||
f.location = *gcsLocation
|
||||
}
|
||||
if *gcsStorageClass != "" {
|
||||
f.storageClass = *gcsStorageClass
|
||||
}
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
@@ -490,7 +480,7 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
|
||||
remote := object.Name[rootLength:]
|
||||
// is this a directory marker?
|
||||
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
|
||||
if recurse && remote != "" {
|
||||
if recurse {
|
||||
// add a directory in if --fast-list since will have no prefixes
|
||||
err = fn(remote[:len(remote)-1], object, true)
|
||||
if err != nil {
|
||||
@@ -560,10 +550,10 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
if f.opt.ProjectNumber == "" {
|
||||
if f.projectNumber == "" {
|
||||
return nil, errors.New("can't list buckets without project number")
|
||||
}
|
||||
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
|
||||
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
|
||||
for {
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -682,17 +672,17 @@ func (f *Fs) Mkdir(dir string) (err error) {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
|
||||
if f.opt.ProjectNumber == "" {
|
||||
if f.projectNumber == "" {
|
||||
return errors.New("can't make bucket without project number")
|
||||
}
|
||||
|
||||
bucket := storage.Bucket{
|
||||
Name: f.bucket,
|
||||
Location: f.opt.Location,
|
||||
StorageClass: f.opt.StorageClass,
|
||||
Location: f.location,
|
||||
StorageClass: f.storageClass,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
|
||||
_, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -958,7 +948,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
|
||||
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.objectACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
@@ -14,8 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
@@ -36,7 +35,7 @@ func init() {
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of http host to connect to",
|
||||
Required: true,
|
||||
Optional: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
@@ -46,17 +45,11 @@ func init() {
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"url"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
endpoint *url.URL
|
||||
endpointURL string // endpoint as a string
|
||||
httpClient *http.Client
|
||||
@@ -85,20 +78,14 @@ func statusError(res *http.Response, err error) error {
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(opt.Endpoint, "/") {
|
||||
opt.Endpoint += "/"
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
endpoint := config.FileGet(name, "url")
|
||||
if !strings.HasSuffix(endpoint, "/") {
|
||||
endpoint += "/"
|
||||
}
|
||||
|
||||
// Parse the endpoint and stick the root onto it
|
||||
base, err := url.Parse(opt.Endpoint)
|
||||
base, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -143,7 +130,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
httpClient: client,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
@@ -193,7 +179,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
}
|
||||
err := o.stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "Stat failed")
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
@@ -416,9 +402,6 @@ func (o *Object) url() string {
|
||||
func (o *Object) stat() error {
|
||||
url := o.url()
|
||||
res, err := o.fs.httpClient.Head(url)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stat")
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -30,7 +29,7 @@ var (
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
func prepareServer(t *testing.T) func() {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
@@ -42,24 +41,19 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
// config.FileSet(remoteName, "type", "http")
|
||||
// config.FileSet(remoteName, "url", ts.URL)
|
||||
|
||||
m := configmap.Simple{
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
}
|
||||
config.FileSet(remoteName, "type", "http")
|
||||
config.FileSet(remoteName, "url", ts.URL)
|
||||
|
||||
// return a function to tidy up
|
||||
return m, ts.Close
|
||||
return ts.Close
|
||||
}
|
||||
|
||||
// prepare the test server and return a function to tidy it up afterwards
|
||||
func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
m, tidy := prepareServer(t)
|
||||
tidy := prepareServer(t)
|
||||
|
||||
// Instantiate it
|
||||
f, err := NewFs(remoteName, "", m)
|
||||
f, err := NewFs(remoteName, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, tidy
|
||||
@@ -144,11 +138,6 @@ func TestNewObject(t *testing.T) {
|
||||
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
|
||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||
|
||||
// check object not found
|
||||
o, err = f.NewObject("not found.txt")
|
||||
assert.Nil(t, o)
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
@@ -188,20 +177,20 @@ func TestMimeType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsAFileRoot(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "one%.txt", m)
|
||||
f, err := NewFs(remoteName, "one%.txt")
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
testListRoot(t, f)
|
||||
}
|
||||
|
||||
func TestIsAFileSubDir(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
||||
f, err := NewFs(remoteName, "three/underthree.txt")
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
entries, err := f.List("")
|
||||
|
||||
@@ -2,9 +2,7 @@ package hubic
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
|
||||
@@ -23,17 +21,12 @@ func newAuth(f *Fs) *auth {
|
||||
// Request constructs a http.Request for authentication
|
||||
//
|
||||
// returns nil for not needed
|
||||
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
|
||||
func (a *auth) Request(*swift.Connection) (*http.Request, error) {
|
||||
err := a.f.getCredentials()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Response parses the result of an http request
|
||||
|
||||
@@ -16,8 +16,6 @@ import (
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
@@ -54,19 +52,19 @@ func init() {
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("hubic", name, m, oauthConfig)
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("hubic", name, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: append([]fs.Option{{
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Hubic Client Id\nLeave blank normally.",
|
||||
Help: "Hubic Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Hubic Client Secret\nLeave blank normally.",
|
||||
}}, swift.SharedOptions...),
|
||||
Help: "Hubic Client Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -147,8 +145,8 @@ func (f *Fs) getCredentials() (err error) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(name, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Hubic")
|
||||
}
|
||||
@@ -169,15 +167,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrap(err, "error authenticating swift connection")
|
||||
}
|
||||
|
||||
// Parse config into swift.Options struct
|
||||
opt := new(swift.Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make inner swift Fs from the connection
|
||||
swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
|
||||
swiftFs, err := swift.NewFsWithConnection(name, root, c, true)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,316 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// default time format for almost all request and responses
|
||||
timeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
// the API server seems to use a different format
|
||||
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
|
||||
)
|
||||
|
||||
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
|
||||
type Time time.Time
|
||||
|
||||
// UnmarshalXML turns XML into a Time
|
||||
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var v string
|
||||
if err := d.DecodeElement(&v, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == "" {
|
||||
*t = Time(time.Time{})
|
||||
return nil
|
||||
}
|
||||
newTime, err := time.Parse(timeFormat, v)
|
||||
if err == nil {
|
||||
*t = Time(newTime)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalXML turns a Time into XML
|
||||
func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(t.String(), start)
|
||||
}
|
||||
|
||||
// Return Time string in Jottacloud format
|
||||
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
|
||||
// Flag is a hacky type for checking if an attribute is present
|
||||
type Flag bool
|
||||
|
||||
// UnmarshalXMLAttr sets Flag to true if the attribute is present
|
||||
func (f *Flag) UnmarshalXMLAttr(attr xml.Attr) error {
|
||||
*f = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalXMLAttr : Do not use
|
||||
func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
|
||||
attr := xml.Attr{
|
||||
Name: name,
|
||||
Value: "false",
|
||||
}
|
||||
return attr, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type TokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>
|
||||
|
||||
<user time="2018-07-18-T21:39:10Z" host="dn-132">
|
||||
<username>12qh1wsht8cssxdtwl15rqh9</username>
|
||||
<account-type>free</account-type>
|
||||
<locked>false</locked>
|
||||
<capacity>5368709120</capacity>
|
||||
<max-devices>-1</max-devices>
|
||||
<max-mobile-devices>-1</max-mobile-devices>
|
||||
<usage>0</usage>
|
||||
<read-locked>false</read-locked>
|
||||
<write-locked>false</write-locked>
|
||||
<quota-write-locked>false</quota-write-locked>
|
||||
<enable-sync>true</enable-sync>
|
||||
<enable-foldershare>true</enable-foldershare>
|
||||
<devices>
|
||||
<device>
|
||||
<name xml:space="preserve">Jotta</name>
|
||||
<display_name xml:space="preserve">Jotta</display_name>
|
||||
<type>JOTTA</type>
|
||||
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
|
||||
<size>0</size>
|
||||
<modified>2018-07-15-T22:04:59Z</modified>
|
||||
</device>
|
||||
</devices>
|
||||
</user>
|
||||
*/
|
||||
|
||||
// AccountInfo represents a Jottacloud account
|
||||
type AccountInfo struct {
|
||||
Username string `xml:"username"`
|
||||
AccountType string `xml:"account-type"`
|
||||
Locked bool `xml:"locked"`
|
||||
Capacity int64 `xml:"capacity"`
|
||||
MaxDevices int `xml:"max-devices"`
|
||||
MaxMobileDevices int `xml:"max-mobile-devices"`
|
||||
Usage int64 `xml:"usage"`
|
||||
ReadLocked bool `xml:"read-locked"`
|
||||
WriteLocked bool `xml:"write-locked"`
|
||||
QuotaWriteLocked bool `xml:"quota-write-locked"`
|
||||
EnableSync bool `xml:"enable-sync"`
|
||||
EnableFolderShare bool `xml:"enable-foldershare"`
|
||||
Devices []JottaDevice `xml:"devices>device"`
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>/<device>
|
||||
|
||||
<device time="2018-07-23-T20:21:50Z" host="dn-158">
|
||||
<name xml:space="preserve">Jotta</name>
|
||||
<display_name xml:space="preserve">Jotta</display_name>
|
||||
<type>JOTTA</type>
|
||||
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
|
||||
<size>0</size>
|
||||
<modified>2018-07-15-T22:04:59Z</modified>
|
||||
<user>12qh1wsht8cssxdtwl15rqh9</user>
|
||||
<mountPoints>
|
||||
<mountPoint>
|
||||
<name xml:space="preserve">Archive</name>
|
||||
<size>0</size>
|
||||
<modified>2018-07-15-T22:04:59Z</modified>
|
||||
</mountPoint>
|
||||
<mountPoint>
|
||||
<name xml:space="preserve">Shared</name>
|
||||
<size>0</size>
|
||||
<modified></modified>
|
||||
</mountPoint>
|
||||
<mountPoint>
|
||||
<name xml:space="preserve">Sync</name>
|
||||
<size>0</size>
|
||||
<modified></modified>
|
||||
</mountPoint>
|
||||
</mountPoints>
|
||||
<metadata first="" max="" total="3" num_mountpoints="3"/>
|
||||
</device>
|
||||
*/
|
||||
|
||||
// JottaDevice represents a Jottacloud Device
|
||||
type JottaDevice struct {
|
||||
Name string `xml:"name"`
|
||||
DisplayName string `xml:"display_name"`
|
||||
Type string `xml:"type"`
|
||||
Sid string `xml:"sid"`
|
||||
Size int64 `xml:"size"`
|
||||
User string `xml:"user"`
|
||||
MountPoints []JottaMountPoint `xml:"mountPoints>mountPoint"`
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>
|
||||
|
||||
<mountPoint time="2018-07-24-T20:35:02Z" host="dn-157">
|
||||
<name xml:space="preserve">Sync</name>
|
||||
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</path>
|
||||
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</abspath>
|
||||
<size>0</size>
|
||||
<modified></modified>
|
||||
<device>Jotta</device>
|
||||
<user>12qh1wsht8cssxdtwl15rqh9</user>
|
||||
<folders>
|
||||
<folder name="test"/>
|
||||
</folders>
|
||||
<metadata first="" max="" total="1" num_folders="1" num_files="0"/>
|
||||
</mountPoint>
|
||||
*/
|
||||
|
||||
// JottaMountPoint represents a Jottacloud mountpoint
|
||||
type JottaMountPoint struct {
|
||||
Name string `xml:"name"`
|
||||
Size int64 `xml:"size"`
|
||||
Device string `xml:"device"`
|
||||
Folders []JottaFolder `xml:"folders>folder"`
|
||||
Files []JottaFile `xml:"files>file"`
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/<folder>
|
||||
|
||||
<folder name="test" time="2018-07-24-T20:41:37Z" host="dn-158">
|
||||
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</path>
|
||||
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</abspath>
|
||||
<folders>
|
||||
<folder name="t2"/>c
|
||||
</folders>
|
||||
<files>
|
||||
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
|
||||
<currentRevision>
|
||||
<number>1</number>
|
||||
<state>COMPLETED</state>
|
||||
<created>2018-07-05-T15:08:02Z</created>
|
||||
<modified>2018-07-05-T15:08:02Z</modified>
|
||||
<mime>application/octet-stream</mime>
|
||||
<size>30827730</size>
|
||||
<md5>1e8a7b728ab678048df00075c9507158</md5>
|
||||
<updated>2018-07-24-T20:41:10Z</updated>
|
||||
</currentRevision>
|
||||
</file>
|
||||
</files>
|
||||
<metadata first="" max="" total="2" num_folders="1" num_files="1"/>
|
||||
</folder>
|
||||
*/
|
||||
|
||||
// JottaFolder represents a JottacloudFolder
|
||||
type JottaFolder struct {
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
Path string `xml:"path"`
|
||||
CreatedAt Time `xml:"created"`
|
||||
ModifiedAt Time `xml:"modified"`
|
||||
Updated Time `xml:"updated"`
|
||||
Folders []JottaFolder `xml:"folders>folder"`
|
||||
Files []JottaFile `xml:"files>file"`
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
|
||||
|
||||
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
|
||||
<currentRevision>
|
||||
<number>1</number>
|
||||
<state>COMPLETED</state>
|
||||
<created>2018-07-05-T15:08:02Z</created>
|
||||
<modified>2018-07-05-T15:08:02Z</modified>
|
||||
<mime>application/octet-stream</mime>
|
||||
<size>30827730</size>
|
||||
<md5>1e8a7b728ab678048df00075c9507158</md5>
|
||||
<updated>2018-07-24-T20:41:10Z</updated>
|
||||
</currentRevision>
|
||||
</file>
|
||||
*/
|
||||
|
||||
// JottaFile represents a Jottacloud file
|
||||
type JottaFile struct {
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt Time `xml:"currentRevision>created"`
|
||||
ModifiedAt Time `xml:"currentRevision>modified"`
|
||||
Updated Time `xml:"currentRevision>updated"`
|
||||
Size int64 `xml:"currentRevision>size"`
|
||||
MimeType string `xml:"currentRevision>mime"`
|
||||
MD5 string `xml:"currentRevision>md5"`
|
||||
}
|
||||
|
||||
// Error is a custom Error for wrapping Jottacloud error responses
|
||||
type Error struct {
|
||||
StatusCode int `xml:"code"`
|
||||
Message string `xml:"message"`
|
||||
Reason string `xml:"reason"`
|
||||
Cause string `xml:"cause"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("error %d", e.StatusCode)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.Reason != "" {
|
||||
out += fmt.Sprintf(" (%+v)", e.Reason)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AllocateFileRequest to prepare an upload to Jottacloud
|
||||
type AllocateFileRequest struct {
|
||||
Bytes int64 `json:"bytes"`
|
||||
Created string `json:"created"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// AllocateFileResponse for upload requests
|
||||
type AllocateFileResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
State string `json:"state"`
|
||||
UploadID string `json:"upload_id"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
ResumePos int64 `json:"resume_pos"`
|
||||
}
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestMountpointEmptyModificationTime(t *testing.T) {
|
||||
mountpoint := `
|
||||
<mountPoint time="2018-08-12-T09:58:24Z" host="dn-157">
|
||||
<name xml:space="preserve">Sync</name>
|
||||
<path xml:space="preserve">/foo/Jotta</path>
|
||||
<abspath xml:space="preserve">/foo/Jotta</abspath>
|
||||
<size>0</size>
|
||||
<modified></modified>
|
||||
<device>Jotta</device>
|
||||
<user>foo</user>
|
||||
<metadata first="" max="" total="0" num_folders="0" num_files="0"/>
|
||||
</mountPoint>
|
||||
`
|
||||
var jf JottaFolder
|
||||
if err := xml.Unmarshal([]byte(mountpoint), &jf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !time.Time(jf.ModifiedAt).IsZero() {
|
||||
t.Errorf("got non-zero time, want zero")
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,42 +0,0 @@
|
||||
package jottacloud
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestReadMD5(t *testing.T) {
|
||||
// Check readMD5 for different size and threshold
|
||||
for _, size := range []int64{0, 1024, 10 * 1024, 100 * 1024} {
|
||||
t.Run(fmt.Sprintf("%d", size), func(t *testing.T) {
|
||||
hasher := md5.New()
|
||||
n, err := io.Copy(hasher, readers.NewPatternReader(size))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, n, size)
|
||||
wantMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
|
||||
for _, threshold := range []int64{512, 1024, 10 * 1024, 20 * 1024} {
|
||||
t.Run(fmt.Sprintf("%d", threshold), func(t *testing.T) {
|
||||
in := readers.NewPatternReader(size)
|
||||
gotMD5, out, cleanup, err := readMD5(in, size, threshold)
|
||||
defer cleanup()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantMD5, gotMD5)
|
||||
|
||||
// check md5hash of out
|
||||
hasher := md5.New()
|
||||
n, err := io.Copy(hasher, out)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, n, size)
|
||||
outMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
|
||||
assert.Equal(t, wantMD5, outMD5)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
// Test Box filesystem interface
|
||||
package jottacloud_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/jottacloud"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestJottacloud:",
|
||||
NilObject: (*jottacloud.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
/*
|
||||
Translate file names for JottaCloud adapted from OneDrive
|
||||
|
||||
|
||||
The following characters are JottaClous reserved characters, and can't
|
||||
be used in JottaCloud folder and file names.
|
||||
|
||||
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
|
||||
|
||||
|
||||
*/
|
||||
|
||||
package jottacloud
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// Onedrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
':': ':', // FULLWIDTH COLON
|
||||
';': ';', // FULLWIDTH SEMICOLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Filenames can't start with space
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Filenames can't end with space
|
||||
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package jottacloud
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:;|"`, `\*<>?:;|"`},
|
||||
{`\*<>?:;|"\*<>?:;|"`, `\*<>?:;|"\*<>?:;|"`},
|
||||
{" leading space", "␠leading space"},
|
||||
{"trailing space ", "trailing space␠"},
|
||||
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||
{"trailing space /trailing space /trailing space ", "trailing space␠/trailing space␠/trailing space␠"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
// +build windows plan9
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const haveLChtimes = false
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
// +build !windows,!plan9
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const haveLChtimes = true
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
var utimes [2]unix.Timespec
|
||||
utimes[0] = unix.NsecToTimespec(atime.UnixNano())
|
||||
utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
|
||||
if e := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); e != nil {
|
||||
return &os.PathError{Op: "lchtimes", Path: name, Err: e}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -2,7 +2,6 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -17,19 +16,23 @@ import (
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/file"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/appengine/log"
|
||||
)
|
||||
|
||||
var (
|
||||
followSymlinks = flags.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
|
||||
skipSymlinks = flags.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
|
||||
noUTFNorm = flags.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
|
||||
noCheckUpdated = flags.BoolP("local-no-check-updated", "", false, "Don't check to see if the files change during upload")
|
||||
)
|
||||
|
||||
// Constants
|
||||
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
@@ -38,90 +41,29 @@ func init() {
|
||||
Description: "Local Disk",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
Optional: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names",
|
||||
}},
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
This flag disables warning messages on skipped symlinks or junction
|
||||
points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_unicode_normalization",
|
||||
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
|
||||
This flag is deprecated now. Rclone no longer normalizes unicode file
|
||||
names, but it compares them with unicode normalization in the sync
|
||||
routine instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy
|
||||
- source file is being updated" if the file changes during upload.
|
||||
|
||||
However on some file systems this modification time check may fail (eg
|
||||
[Glusterfs #2206](https://github.com/ncw/rclone/issues/2206)) so this
|
||||
check can be disabled with this flag.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
TranslateSymlinks bool `config:"links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // The root directory (OS path)
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
wmu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
nounc bool // Skip UNC conversion on Windows
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
dirNames *mapper // directory name mapping
|
||||
@@ -130,40 +72,30 @@ type Fs struct {
|
||||
|
||||
// Object represents a local filesystem object
|
||||
type Object struct {
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path - properly UTF-8 encoded - for rclone
|
||||
path string // The local path - may not be properly UTF-8 encoded - for OS
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
hashes map[hash.Type]string // Hashes
|
||||
translatedLink bool // Is this object a translated link
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path - properly UTF-8 encoded - for rclone
|
||||
path string // The local path - may not be properly UTF-8 encoded - for OS
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
hashes map[hash.Type]string // Hashes
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.TranslateSymlinks && opt.FollowSymlinks {
|
||||
return nil, errLinksAndCopyLinks
|
||||
}
|
||||
|
||||
if opt.NoUTFNorm {
|
||||
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
var err error
|
||||
|
||||
if *noUTFNorm {
|
||||
log.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
}
|
||||
|
||||
nounc := config.FileGet(name, "nounc")
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
warned: make(map[string]struct{}),
|
||||
nounc: nounc == "true",
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
dirNames: newMapper(),
|
||||
@@ -173,38 +105,24 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
if opt.FollowSymlinks {
|
||||
if *followSymlinks {
|
||||
f.lstat = os.Stat
|
||||
}
|
||||
|
||||
// Check to see if this points to a file
|
||||
fi, err := f.lstat(f.root)
|
||||
if err == nil {
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
f.dev = readDevice(fi)
|
||||
}
|
||||
if err == nil && f.isRegular(fi.Mode()) {
|
||||
if err == nil && fi.Mode().IsRegular() {
|
||||
// It is a file, so use the parent as the root
|
||||
f.root = filepath.Dir(f.root)
|
||||
f.root, _ = getDirFile(f.root)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Determine whether a file is a 'regular' file,
|
||||
// Symlinks are regular files, only if the TranslateSymlink
|
||||
// option is in-effect
|
||||
func (f *Fs) isRegular(mode os.FileMode) bool {
|
||||
if !f.opt.TranslateSymlinks {
|
||||
return mode.IsRegular()
|
||||
}
|
||||
|
||||
// fi.Mode().IsRegular() tests that all mode bits are zero
|
||||
// Since symlinks are accepted, test that all other bits are zero,
|
||||
// except the symlink bit
|
||||
return mode&os.ModeType&^os.ModeSymlink == 0
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
@@ -235,38 +153,18 @@ func (f *Fs) caseInsensitive() bool {
|
||||
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
|
||||
}
|
||||
|
||||
// translateLink checks whether the remote is a translated link
|
||||
// and returns a new path, removing the suffix as needed,
|
||||
// It also returns whether this is a translated link at all
|
||||
//
|
||||
// for regular files, dstPath is returned unchanged
|
||||
func translateLink(remote, dstPath string) (newDstPath string, isTranslatedLink bool) {
|
||||
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
||||
newDstPath = strings.TrimSuffix(dstPath, linkSuffix)
|
||||
return newDstPath, isTranslatedLink
|
||||
}
|
||||
|
||||
// newObject makes a half completed Object
|
||||
//
|
||||
// if dstPath is empty then it is made from remote
|
||||
func (f *Fs) newObject(remote, dstPath string) *Object {
|
||||
translatedLink := false
|
||||
|
||||
if dstPath == "" {
|
||||
dstPath = f.cleanPath(filepath.Join(f.root, remote))
|
||||
}
|
||||
remote = f.cleanRemote(remote)
|
||||
|
||||
if f.opt.TranslateSymlinks {
|
||||
// Possibly receive a new name for dstPath
|
||||
dstPath, translatedLink = translateLink(remote, dstPath)
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: dstPath,
|
||||
translatedLink: translatedLink,
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: dstPath,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -288,11 +186,6 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// Handle the odd case, that a symlink was specfied by name without the link suffix
|
||||
if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
}
|
||||
if o.mode.IsDir() {
|
||||
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
|
||||
@@ -316,7 +209,6 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
|
||||
dir = f.dirNames.Load(dir)
|
||||
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
|
||||
remote := f.cleanRemote(dir)
|
||||
@@ -351,15 +243,8 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
newRemote := path.Join(remote, name)
|
||||
newPath := filepath.Join(fsDirPath, name)
|
||||
// Follow symlinks if required
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
if *followSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
fi, err = os.Stat(newPath)
|
||||
if os.IsNotExist(err) {
|
||||
// Skip bad symlinks
|
||||
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
||||
fs.Errorf(newRemote, "Listing error: %v", err)
|
||||
accounting.Stats.Error(err)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -368,15 +253,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if fi.IsDir() {
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
|
||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi) {
|
||||
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
|
||||
entries = append(entries, d)
|
||||
}
|
||||
} else {
|
||||
// Check whether this link should be translated
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
}
|
||||
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -476,7 +357,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
f.dev = readDevice(fi)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -590,7 +471,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if !dstObj.fs.isRegular(dstObj.mode) {
|
||||
} else if !dstObj.mode.IsRegular() {
|
||||
// It isn't a file
|
||||
return nil, errors.New("can't move file onto non-file")
|
||||
}
|
||||
@@ -649,7 +530,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
// Create parent of destination
|
||||
dstParentPath := filepath.Dir(dstPath)
|
||||
dstParentPath, _ := getDirFile(dstPath)
|
||||
err = os.MkdirAll(dstParentPath, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -712,13 +593,7 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
|
||||
var in io.ReadCloser
|
||||
|
||||
if !o.translatedLink {
|
||||
in, err = file.Open(o.path)
|
||||
} else {
|
||||
in, err = o.openTranslatedLink(0, -1)
|
||||
}
|
||||
in, err := os.Open(o.path)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to open")
|
||||
}
|
||||
@@ -749,12 +624,7 @@ func (o *Object) ModTime() time.Time {
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
var err error
|
||||
if o.translatedLink {
|
||||
err = lChtimes(o.path, modTime, modTime)
|
||||
} else {
|
||||
err = os.Chtimes(o.path, modTime, modTime)
|
||||
}
|
||||
err := os.Chtimes(o.path, modTime, modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -772,8 +642,8 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
}
|
||||
mode := o.mode
|
||||
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
|
||||
if !o.fs.opt.SkipSymlinks {
|
||||
if mode&os.ModeSymlink != 0 {
|
||||
if !*skipSymlinks {
|
||||
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
||||
}
|
||||
return false
|
||||
@@ -798,7 +668,7 @@ type localOpenFile struct {
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||
if !file.o.fs.opt.NoCheckUpdated {
|
||||
if !*noCheckUpdated {
|
||||
// Check if file has the same size and modTime
|
||||
fi, err := file.fd.Stat()
|
||||
if err != nil {
|
||||
@@ -833,16 +703,6 @@ func (file *localOpenFile) Close() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Returns a ReadCloser() object that contains the contents of a symbolic link
|
||||
func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err error) {
|
||||
// Read the link and return the destination it as the contents of the object
|
||||
linkdst, err := os.Readlink(o.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
@@ -862,12 +722,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Handle a translated link
|
||||
if o.translatedLink {
|
||||
return o.openTranslatedLink(offset, limit)
|
||||
}
|
||||
|
||||
fd, err := file.Open(o.path)
|
||||
fd, err := os.Open(o.path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -894,23 +749,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
|
||||
// mkdirAll makes all the directories needed to store the object
|
||||
func (o *Object) mkdirAll() error {
|
||||
dir := filepath.Dir(o.path)
|
||||
dir, _ := getDirFile(o.path)
|
||||
return os.MkdirAll(dir, 0777)
|
||||
}
|
||||
|
||||
type nopWriterCloser struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (nwc nopWriterCloser) Close() error {
|
||||
// noop
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the object from in with modTime and size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
var out io.WriteCloser
|
||||
|
||||
hashes := hash.Supported
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
@@ -924,23 +768,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
|
||||
var symlinkData bytes.Buffer
|
||||
// If the object is a regular file, create it.
|
||||
// If it is a translated link, just read in the contents, and
|
||||
// then create a symlink
|
||||
if !o.translatedLink {
|
||||
f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = preAllocate(src.Size(), f)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
out = f
|
||||
} else {
|
||||
out = nopWriterCloser{&symlinkData}
|
||||
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Calculate the hash of the object we are reading as we go along
|
||||
@@ -955,26 +785,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
|
||||
if o.translatedLink {
|
||||
if err == nil {
|
||||
// Remove any current symlink or file, if one exsits
|
||||
if _, err := os.Lstat(o.path); err == nil {
|
||||
if removeErr := os.Remove(o.path); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove previous file: %v", removeErr)
|
||||
return removeErr
|
||||
}
|
||||
}
|
||||
// Use the contents for the copied object to create a symlink
|
||||
err = os.Symlink(symlinkData.String(), o.path)
|
||||
}
|
||||
|
||||
// only continue if symlink creation succeeded
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fs.Logf(o, "Removing partially written file on error: %v", err)
|
||||
if removeErr := os.Remove(o.path); removeErr != nil {
|
||||
@@ -1027,6 +837,17 @@ func (o *Object) Remove() error {
|
||||
return remove(o.path)
|
||||
}
|
||||
|
||||
// Return the directory and file from an OS path. Assumes
|
||||
// os.PathSeparator is used.
|
||||
func getDirFile(s string) (string, string) {
|
||||
i := strings.LastIndex(s, string(os.PathSeparator))
|
||||
dir, file := s[:i], s[i+1:]
|
||||
if dir == "" {
|
||||
dir = string(os.PathSeparator)
|
||||
}
|
||||
return dir, file
|
||||
}
|
||||
|
||||
// cleanPathFragment cleans an OS path fragment which is part of a
|
||||
// bigger path and not necessarily absolute
|
||||
func cleanPathFragment(s string) string {
|
||||
@@ -1057,7 +878,7 @@ func (f *Fs) cleanPath(s string) string {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
if !f.opt.NoUNC {
|
||||
if !f.nounc {
|
||||
// Convert to UNC
|
||||
s = uncPath(s)
|
||||
}
|
||||
|
||||
@@ -1,19 +1,13 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/file"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -44,17 +38,14 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
filePath := "sub dir/local test"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
|
||||
fd, err := file.Open(path.Join(r.LocalName, filePath))
|
||||
fd, err := os.Open(path.Join(r.LocalName, filePath))
|
||||
if err != nil {
|
||||
t.Fatalf("failed opening file %q: %v", filePath, err)
|
||||
}
|
||||
defer func() {
|
||||
require.NoError(t, fd.Close())
|
||||
}()
|
||||
|
||||
fi, err := fd.Stat()
|
||||
require.NoError(t, err)
|
||||
o := &Object{size: fi.Size(), modTime: fi.ModTime(), fs: &Fs{}}
|
||||
o := &Object{size: fi.Size(), modTime: fi.ModTime()}
|
||||
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
|
||||
hash, err := hash.NewMultiHasherTypes(hash.Supported)
|
||||
require.NoError(t, err)
|
||||
@@ -74,115 +65,14 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
require.Errorf(t, err, "can't copy - source file is being updated")
|
||||
|
||||
// turn the checking off and try again
|
||||
in.o.fs.opt.NoCheckUpdated = true
|
||||
|
||||
*noCheckUpdated = true
|
||||
defer func() {
|
||||
*noCheckUpdated = false
|
||||
}()
|
||||
|
||||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestSymlink(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
f := r.Flocal.(*Fs)
|
||||
dir := f.root
|
||||
|
||||
// Write a file
|
||||
modTime1 := fstest.Time("2001-02-03T04:05:10.123123123Z")
|
||||
file1 := r.WriteFile("file.txt", "hello", modTime1)
|
||||
|
||||
// Write a symlink
|
||||
modTime2 := fstest.Time("2002-02-03T04:05:10.123123123Z")
|
||||
symlinkPath := filepath.Join(dir, "symlink.txt")
|
||||
require.NoError(t, os.Symlink("file.txt", symlinkPath))
|
||||
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
if runtime.GOOS == "windows" {
|
||||
file2.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
|
||||
// Check with no symlink flags
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2d)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
// Set fs into "-l" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
}
|
||||
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
if runtime.GOOS == "windows" {
|
||||
file3.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
}
|
||||
|
||||
// Check it got the correct contents
|
||||
symlinkPath = filepath.Join(dir, "symlink2.txt")
|
||||
fi, err := os.Lstat(symlinkPath)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, fi.Mode().IsRegular())
|
||||
linkText, err := os.Readlink(symlinkPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkText)
|
||||
|
||||
// Check that NewObject gets the correct object
|
||||
o, err := r.Flocal.NewObject("symlink2.txt" + linkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
}
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
_, err = r.Flocal.NewObject("symlink2.txt")
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Check reading the object
|
||||
in, err := o.Open()
|
||||
require.NoError(t, err)
|
||||
contents, err := ioutil.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt", string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
|
||||
// Check reading the object with range
|
||||
in, err = o.Open(&fs.RangeOption{Start: 2, End: 5})
|
||||
require.NoError(t, err)
|
||||
contents, err = ioutil.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt"[2:5+1], string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
}
|
||||
|
||||
func TestSymlinkError(t *testing.T) {
|
||||
m := configmap.Simple{
|
||||
"links": "true",
|
||||
"copy_links": "true",
|
||||
}
|
||||
_, err := NewFs("local", "/", m)
|
||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||
}
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
//+build !windows,!linux
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
//+build linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
if size <= 0 {
|
||||
return nil
|
||||
}
|
||||
err := unix.Fallocate(int(out.Fd()), unix.FALLOC_FL_KEEP_SIZE, 0, size)
|
||||
// FIXME could be doing something here
|
||||
// if err == unix.ENOSPC {
|
||||
// log.Printf("No space")
|
||||
// }
|
||||
return err
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
//+build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var (
|
||||
ntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
ntQueryVolumeInformationFile = ntdll.NewProc("NtQueryVolumeInformationFile")
|
||||
ntSetInformationFile = ntdll.NewProc("NtSetInformationFile")
|
||||
)
|
||||
|
||||
type fileAllocationInformation struct {
|
||||
AllocationSize uint64
|
||||
}
|
||||
|
||||
type fileFsSizeInformation struct {
|
||||
TotalAllocationUnits uint64
|
||||
AvailableAllocationUnits uint64
|
||||
SectorsPerAllocationUnit uint32
|
||||
BytesPerSector uint32
|
||||
}
|
||||
|
||||
type ioStatusBlock struct {
|
||||
Status, Information uintptr
|
||||
}
|
||||
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
if size <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
iosb ioStatusBlock
|
||||
fsSizeInfo fileFsSizeInformation
|
||||
allocInfo fileAllocationInformation
|
||||
)
|
||||
|
||||
// Query info about the block sizes on the file system
|
||||
_, _, e1 := ntQueryVolumeInformationFile.Call(
|
||||
uintptr(out.Fd()),
|
||||
uintptr(unsafe.Pointer(&iosb)),
|
||||
uintptr(unsafe.Pointer(&fsSizeInfo)),
|
||||
uintptr(unsafe.Sizeof(fsSizeInfo)),
|
||||
uintptr(3), // FileFsSizeInformation
|
||||
)
|
||||
if e1 != nil && e1 != syscall.Errno(0) {
|
||||
return errors.Wrap(e1, "preAllocate NtQueryVolumeInformationFile failed")
|
||||
}
|
||||
|
||||
// Calculate the allocation size
|
||||
clusterSize := uint64(fsSizeInfo.BytesPerSector) * uint64(fsSizeInfo.SectorsPerAllocationUnit)
|
||||
if clusterSize <= 0 {
|
||||
return errors.Errorf("preAllocate clusterSize %d <= 0", clusterSize)
|
||||
}
|
||||
allocInfo.AllocationSize = (1 + uint64(size-1)/clusterSize) * clusterSize
|
||||
|
||||
// Ask for the allocation
|
||||
_, _, e1 = ntSetInformationFile.Call(
|
||||
uintptr(out.Fd()),
|
||||
uintptr(unsafe.Pointer(&iosb)),
|
||||
uintptr(unsafe.Pointer(&allocInfo)),
|
||||
uintptr(unsafe.Sizeof(allocInfo)),
|
||||
uintptr(19), // FileAllocationInformation
|
||||
)
|
||||
if e1 != nil && e1 != syscall.Errno(0) {
|
||||
return errors.Wrap(e1, "preAllocate NtSetInformationFile failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -8,6 +8,6 @@ import "os"
|
||||
|
||||
// readDevice turns a valid os.FileInfo into a device number,
|
||||
// returning devUnset if it fails.
|
||||
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
|
||||
func readDevice(fi os.FileInfo) uint64 {
|
||||
return devUnset
|
||||
}
|
||||
|
||||
@@ -9,12 +9,17 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
)
|
||||
|
||||
var (
|
||||
oneFileSystem = flags.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
|
||||
)
|
||||
|
||||
// readDevice turns a valid os.FileInfo into a device number,
|
||||
// returning devUnset if it fails.
|
||||
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
|
||||
if !oneFileSystem {
|
||||
func readDevice(fi os.FileInfo) uint64 {
|
||||
if !*oneFileSystem {
|
||||
return devUnset
|
||||
}
|
||||
statT, ok := fi.Sys().(*syscall.Stat_t)
|
||||
|
||||
@@ -24,8 +24,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -39,10 +39,12 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
eventWaitTime = 500 * time.Millisecond
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
useTrash = true // FIXME make configurable - rclone global
|
||||
)
|
||||
|
||||
var (
|
||||
megaDebug = flags.BoolP("mega-debug", "", false, "If set then output more debug from mega.")
|
||||
megaCacheMu sync.Mutex // mutex for the below
|
||||
megaCache = map[string]*mega.Mega{} // cache logged in Mega's by user
|
||||
)
|
||||
@@ -56,46 +58,20 @@ func init() {
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name",
|
||||
Required: true,
|
||||
Optional: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
Required: true,
|
||||
Optional: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "debug",
|
||||
Help: `Output more debug from Mega.
|
||||
|
||||
If this flag is set (along with -vv) it will print further debugging
|
||||
information from the mega backend.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: `Delete files permanently rather than putting them into the trash.
|
||||
|
||||
Normally the mega backend will put all deletions into the trash rather
|
||||
than permanently deleting them. If you specify this then rclone will
|
||||
permanently delete objects instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Debug bool `config:"debug"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
}
|
||||
|
||||
// Fs represents a remote mega
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
srv *mega.Mega // the connection to the server
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
@@ -169,16 +145,12 @@ func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.Pass != "" {
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
if pass != "" {
|
||||
var err error
|
||||
opt.Pass, err = obscure.Reveal(opt.Pass)
|
||||
pass, err = obscure.Reveal(pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
||||
}
|
||||
@@ -191,31 +163,30 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// them up between different remotes.
|
||||
megaCacheMu.Lock()
|
||||
defer megaCacheMu.Unlock()
|
||||
srv := megaCache[opt.User]
|
||||
srv := megaCache[user]
|
||||
if srv == nil {
|
||||
srv = mega.New().SetClient(fshttp.NewClient(fs.Config))
|
||||
srv.SetRetries(fs.Config.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetLogger(func(format string, v ...interface{}) {
|
||||
fs.Infof("*go-mega*", format, v...)
|
||||
})
|
||||
if opt.Debug {
|
||||
if *megaDebug {
|
||||
srv.SetDebugger(func(format string, v ...interface{}) {
|
||||
fs.Debugf("*go-mega*", format, v...)
|
||||
})
|
||||
}
|
||||
|
||||
err := srv.Login(opt.User, opt.Pass)
|
||||
err := srv.Login(user, pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't login")
|
||||
}
|
||||
megaCache[opt.User] = srv
|
||||
megaCache[user] = srv
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: srv,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
@@ -225,7 +196,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}).Fill(f)
|
||||
|
||||
// Find the root node and check if it is a file or not
|
||||
_, err = f.findRoot(false)
|
||||
_, err := f.findRoot(false)
|
||||
switch err {
|
||||
case nil:
|
||||
// root node found and is a directory
|
||||
@@ -569,7 +540,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// deleteNode removes a file or directory, observing useTrash
|
||||
func (f *Fs) deleteNode(node *mega.Node) (err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Delete(node, f.opt.HardDelete)
|
||||
err = f.srv.Delete(node, !useTrash)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -2,16 +2,10 @@
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
import "time"
|
||||
|
||||
const (
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
|
||||
// PackageTypeOneNote is the package type value for OneNote files
|
||||
PackageTypeOneNote = "oneNote"
|
||||
)
|
||||
|
||||
// Error is returned from one drive when things go wrong
|
||||
@@ -94,27 +88,9 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
// ItemReference groups data needed to reference a OneDrive item
|
||||
// across the service into a single structure.
|
||||
type ItemReference struct {
|
||||
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
|
||||
ID string `json:"id"` // Unique identifier for the item. Read/Write.
|
||||
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
|
||||
DriveType string `json:"driveType"` // Type of the drive, Read-Only
|
||||
}
|
||||
|
||||
// RemoteItemFacet groups data needed to reference a OneDrive remote item
|
||||
type RemoteItemFacet struct {
|
||||
ID string `json:"id"` // The unique identifier of the item within the remote Drive. Read-only.
|
||||
Name string `json:"name"` // The name of the item (filename and extension). Read-write.
|
||||
CreatedBy IdentitySet `json:"createdBy"` // Identity of the user, device, and application which created the item. Read-only.
|
||||
LastModifiedBy IdentitySet `json:"lastModifiedBy"` // Identity of the user, device, and application which last modified the item. Read-only.
|
||||
CreatedDateTime Timestamp `json:"createdDateTime"` // Date and time of item creation. Read-only.
|
||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
|
||||
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
|
||||
Size int64 `json:"size"` // Size of the item in bytes. Read-only.
|
||||
WebURL string `json:"webUrl"` // URL that displays the resource in the browser. Read-only.
|
||||
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
|
||||
ID string `json:"id"` // Unique identifier for the item. Read/Write.
|
||||
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
|
||||
}
|
||||
|
||||
// FolderFacet groups folder-related data on OneDrive into a single structure
|
||||
@@ -151,13 +127,6 @@ type FileSystemInfoFacet struct {
|
||||
type DeletedFacet struct {
|
||||
}
|
||||
|
||||
// PackageFacet indicates that a DriveItem is the top level item
|
||||
// in a "package" or a collection of items that should be treated as a collection instead of individual items.
|
||||
// `oneNote` is the only currently defined value.
|
||||
type PackageFacet struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// Item represents metadata for an item in OneDrive
|
||||
type Item struct {
|
||||
ID string `json:"id"` // The unique identifier of the item within the Drive. Read-only.
|
||||
@@ -174,14 +143,12 @@ type Item struct {
|
||||
Description string `json:"description"` // Provide a user-visible description of the item. Read-write.
|
||||
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||
RemoteItem *RemoteItemFacet `json:"remoteItem"` // Remote Item metadata, if the item is a remote shared item. Read-only.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
// Image *ImageFacet `json:"image"` // Image metadata, if the item is an image. Read-only.
|
||||
// Photo *PhotoFacet `json:"photo"` // Photo metadata, if the item is a photo. Read-only.
|
||||
// Audio *AudioFacet `json:"audio"` // Audio metadata, if the item is an audio file. Read-only.
|
||||
// Video *VideoFacet `json:"video"` // Video metadata, if the item is a video. Read-only.
|
||||
// Location *LocationFacet `json:"location"` // Location metadata, if the item has location data. Read-only.
|
||||
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
|
||||
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
|
||||
}
|
||||
|
||||
@@ -250,28 +217,6 @@ type MoveItemRequest struct {
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo,omitempty"` // File system information on client. Read-write.
|
||||
}
|
||||
|
||||
//CreateShareLinkRequest is the request to create a sharing link
|
||||
//Always Type:view and Scope:anonymous for public sharing
|
||||
type CreateShareLinkRequest struct {
|
||||
Type string `json:"type"` //Link type in View, Edit or Embed
|
||||
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization
|
||||
}
|
||||
|
||||
//CreateShareLinkResponse is the response from CreateShareLinkRequest
|
||||
type CreateShareLinkResponse struct {
|
||||
ID string `json:"id"`
|
||||
Roles []string `json:"roles"`
|
||||
Link struct {
|
||||
Type string `json:"type"`
|
||||
Scope string `json:"scope"`
|
||||
WebURL string `json:"webUrl"`
|
||||
Application struct {
|
||||
ID string `json:"id"`
|
||||
DisplayName string `json:"displayName"`
|
||||
} `json:"application"`
|
||||
} `json:"link"`
|
||||
}
|
||||
|
||||
// AsyncOperationStatus provides information on the status of a asynchronous job progress.
|
||||
//
|
||||
// The following API calls return AsyncOperationStatus resources:
|
||||
@@ -279,134 +224,7 @@ type CreateShareLinkResponse struct {
|
||||
// Copy Item
|
||||
// Upload From URL
|
||||
type AsyncOperationStatus struct {
|
||||
Operation string `json:"operation"` // The type of job being run.
|
||||
PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete.
|
||||
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
|
||||
}
|
||||
|
||||
// GetID returns a normalized ID of the item
|
||||
// If DriveID is known it will be prefixed to the ID with # seperator
|
||||
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
|
||||
func (i *Item) GetID() string {
|
||||
if i.IsRemote() && i.RemoteItem.ID != "" {
|
||||
return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID
|
||||
} else if i.ParentReference != nil && strings.Index(i.ID, "#") == -1 {
|
||||
return i.ParentReference.DriveID + "#" + i.ID
|
||||
}
|
||||
return i.ID
|
||||
}
|
||||
|
||||
// GetDriveID returns a normalized ParentReferance of the item
|
||||
func (i *Item) GetDriveID() string {
|
||||
return i.GetParentReferance().DriveID
|
||||
}
|
||||
|
||||
// GetName returns a normalized Name of the item
|
||||
func (i *Item) GetName() string {
|
||||
if i.IsRemote() && i.RemoteItem.Name != "" {
|
||||
return i.RemoteItem.Name
|
||||
}
|
||||
return i.Name
|
||||
}
|
||||
|
||||
// GetFolder returns a normalized Folder of the item
|
||||
func (i *Item) GetFolder() *FolderFacet {
|
||||
if i.IsRemote() && i.RemoteItem.Folder != nil {
|
||||
return i.RemoteItem.Folder
|
||||
}
|
||||
return i.Folder
|
||||
}
|
||||
|
||||
// GetPackage returns a normalized Package of the item
|
||||
func (i *Item) GetPackage() *PackageFacet {
|
||||
if i.IsRemote() && i.RemoteItem.Package != nil {
|
||||
return i.RemoteItem.Package
|
||||
}
|
||||
return i.Package
|
||||
}
|
||||
|
||||
// GetPackageType returns the package type of the item if available,
|
||||
// otherwise ""
|
||||
func (i *Item) GetPackageType() string {
|
||||
pack := i.GetPackage()
|
||||
if pack == nil {
|
||||
return ""
|
||||
}
|
||||
return pack.Type
|
||||
}
|
||||
|
||||
// GetFile returns a normalized File of the item
|
||||
func (i *Item) GetFile() *FileFacet {
|
||||
if i.IsRemote() && i.RemoteItem.File != nil {
|
||||
return i.RemoteItem.File
|
||||
}
|
||||
return i.File
|
||||
}
|
||||
|
||||
// GetFileSystemInfo returns a normalized FileSystemInfo of the item
|
||||
func (i *Item) GetFileSystemInfo() *FileSystemInfoFacet {
|
||||
if i.IsRemote() && i.RemoteItem.FileSystemInfo != nil {
|
||||
return i.RemoteItem.FileSystemInfo
|
||||
}
|
||||
return i.FileSystemInfo
|
||||
}
|
||||
|
||||
// GetSize returns a normalized Size of the item
|
||||
func (i *Item) GetSize() int64 {
|
||||
if i.IsRemote() && i.RemoteItem.Size != 0 {
|
||||
return i.RemoteItem.Size
|
||||
}
|
||||
return i.Size
|
||||
}
|
||||
|
||||
// GetWebURL returns a normalized WebURL of the item
|
||||
func (i *Item) GetWebURL() string {
|
||||
if i.IsRemote() && i.RemoteItem.WebURL != "" {
|
||||
return i.RemoteItem.WebURL
|
||||
}
|
||||
return i.WebURL
|
||||
}
|
||||
|
||||
// GetCreatedBy returns a normalized CreatedBy of the item
|
||||
func (i *Item) GetCreatedBy() IdentitySet {
|
||||
if i.IsRemote() && i.RemoteItem.CreatedBy != (IdentitySet{}) {
|
||||
return i.RemoteItem.CreatedBy
|
||||
}
|
||||
return i.CreatedBy
|
||||
}
|
||||
|
||||
// GetLastModifiedBy returns a normalized LastModifiedBy of the item
|
||||
func (i *Item) GetLastModifiedBy() IdentitySet {
|
||||
if i.IsRemote() && i.RemoteItem.LastModifiedBy != (IdentitySet{}) {
|
||||
return i.RemoteItem.LastModifiedBy
|
||||
}
|
||||
return i.LastModifiedBy
|
||||
}
|
||||
|
||||
// GetCreatedDateTime returns a normalized CreatedDateTime of the item
|
||||
func (i *Item) GetCreatedDateTime() Timestamp {
|
||||
if i.IsRemote() && i.RemoteItem.CreatedDateTime != (Timestamp{}) {
|
||||
return i.RemoteItem.CreatedDateTime
|
||||
}
|
||||
return i.CreatedDateTime
|
||||
}
|
||||
|
||||
// GetLastModifiedDateTime returns a normalized LastModifiedDateTime of the item
|
||||
func (i *Item) GetLastModifiedDateTime() Timestamp {
|
||||
if i.IsRemote() && i.RemoteItem.LastModifiedDateTime != (Timestamp{}) {
|
||||
return i.RemoteItem.LastModifiedDateTime
|
||||
}
|
||||
return i.LastModifiedDateTime
|
||||
}
|
||||
|
||||
// GetParentReferance returns a normalized ParentReferance of the item
|
||||
func (i *Item) GetParentReferance() *ItemReference {
|
||||
if i.IsRemote() && i.ParentReference == nil {
|
||||
return i.RemoteItem.ParentReference
|
||||
}
|
||||
return i.ParentReference
|
||||
}
|
||||
|
||||
// IsRemote checks if item is a remote item
|
||||
func (i *Item) IsRemote() bool {
|
||||
return i.RemoteItem != nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,10 @@
|
||||
// Test OneDrive filesystem interface
|
||||
package onedrive
|
||||
package onedrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/backend/onedrive"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,15 +12,6 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestOneDrive:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
CeilChunkSize: fstests.NextMultipleOf(chunkSizeMultiple),
|
||||
},
|
||||
NilObject: (*onedrive.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -140,7 +140,7 @@ func TestQuickXorHashByBlock(t *testing.T) {
|
||||
got := h.Sum(nil)
|
||||
want, err := base64.StdEncoding.DecodeString(test.out)
|
||||
require.NoError(t, err, what)
|
||||
assert.Equal(t, want, got, test.size, what)
|
||||
assert.Equal(t, want, got[:], test.size, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,22 +6,19 @@ import (
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/dircache"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -40,30 +37,23 @@ func init() {
|
||||
Description: "OpenDrive",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "username",
|
||||
Help: "Username",
|
||||
Required: true,
|
||||
Name: "username",
|
||||
Help: "Username",
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Password.",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UserName string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
}
|
||||
|
||||
// Fs represents a remote server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
username string // account name
|
||||
password string // auth key0
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *pacer.Pacer // To pace and retry the API calls
|
||||
session UserSessionInfo // contains the session data
|
||||
@@ -120,31 +110,27 @@ func (f *Fs) DirCacheFlush() {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
if opt.UserName == "" {
|
||||
username := config.FileGet(name, "username")
|
||||
if username == "" {
|
||||
return nil, errors.New("username not found")
|
||||
}
|
||||
opt.Password, err = obscure.Reveal(opt.Password)
|
||||
password, err := obscure.Reveal(config.FileGet(name, "password"))
|
||||
if err != nil {
|
||||
return nil, errors.New("password could not revealed")
|
||||
return nil, errors.New("password coudl not revealed")
|
||||
}
|
||||
if opt.Password == "" {
|
||||
if password == "" {
|
||||
return nil, errors.New("password not found")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
name: name,
|
||||
username: username,
|
||||
password: password,
|
||||
root: root,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, "0", f)
|
||||
@@ -155,7 +141,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// get sessionID
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
account := Account{Username: opt.UserName, Password: opt.Password}
|
||||
account := Account{Username: username, Password: password}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -179,17 +165,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, "0", &tempF)
|
||||
tempF.root = newRoot
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, "0", &newF)
|
||||
newF.root = newRoot
|
||||
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -197,13 +183,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
return &newF, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -932,9 +913,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// resp.Body.Close()
|
||||
// fs.Debugf(nil, "PostOpen: %#v", openResponse)
|
||||
|
||||
// 10 MB chunks size
|
||||
// 1 MB chunks size
|
||||
chunkSize := int64(1024 * 1024 * 10)
|
||||
buf := make([]byte, int(chunkSize))
|
||||
chunkOffset := int64(0)
|
||||
remainingBytes := size
|
||||
chunkCounter := 0
|
||||
@@ -947,19 +927,14 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
remainingBytes -= currentChunkSize
|
||||
fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes)
|
||||
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, currentChunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
var formBody bytes.Buffer
|
||||
w := multipart.NewWriter(&formBody)
|
||||
fw, err := w.CreateFormFile("file_data", o.remote)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if _, err = io.Copy(fw, chunk); err != nil {
|
||||
if _, err = io.CopyN(fw, in, currentChunkSize); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Add session_id
|
||||
@@ -1090,7 +1065,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + url.QueryEscape(replaceReservedChars(leaf)),
|
||||
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + rest.URLPathEscape(replaceReservedChars(leaf)),
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
|
||||
@@ -23,8 +23,6 @@ import (
|
||||
"github.com/ncw/rclone/backend/pcloud/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -67,31 +65,26 @@ func init() {
|
||||
Name: "pcloud",
|
||||
Description: "Pcloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("pcloud", name, m, oauthConfig)
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("pcloud", name, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Pcloud App Client Id\nLeave blank normally.",
|
||||
Help: "Pcloud App Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Pcloud App Client Secret\nLeave blank normally.",
|
||||
Help: "Pcloud App Client Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
}
|
||||
|
||||
// Fs represents a remote pcloud
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
@@ -236,23 +229,16 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Pcloud")
|
||||
log.Fatalf("Failed to configure Pcloud: %v", err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
@@ -276,16 +262,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, rootID, &newF)
|
||||
newF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -293,13 +279,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
return &newF, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -1112,12 +1093,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
// sometimes pcloud leaves a half complete file on
|
||||
// error, so delete it if it exists
|
||||
delObj, delErr := o.fs.NewObject(o.remote)
|
||||
if delErr == nil && delObj != nil {
|
||||
_ = delObj.Remove()
|
||||
}
|
||||
return err
|
||||
}
|
||||
if len(result.Items) != 1 {
|
||||
|
||||
@@ -17,8 +17,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
@@ -35,91 +34,57 @@ func init() {
|
||||
Description: "QingCloud Object Storage",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "env_auth",
|
||||
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter QingStor credentials in the next step",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get QingStor credentials from the environment (env vars or IAM)",
|
||||
}},
|
||||
Name: "env_auth",
|
||||
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "false",
|
||||
Help: "Enter QingStor credentials in the next step",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get QingStor credentials from the environment (env vars or IAM)",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.",
|
||||
Help: "QingStor Access Key ID - leave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
||||
Help: "QingStor Secret Access Key (password) - leave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Enter a endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"",
|
||||
}, {
|
||||
Name: "zone",
|
||||
Help: "Zone to connect to.\nDefault is \"pek3a\".",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "pek3a",
|
||||
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
|
||||
}, {
|
||||
Value: "sh1a",
|
||||
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
|
||||
}, {
|
||||
Value: "gd2a",
|
||||
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
|
||||
}},
|
||||
Help: "Choose or Enter a zone to connect. Default is \"pek3a\".",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "pek3a",
|
||||
|
||||
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
|
||||
},
|
||||
{
|
||||
Value: "sh1a",
|
||||
|
||||
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
|
||||
},
|
||||
{
|
||||
Value: "gd2a",
|
||||
|
||||
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "connection_retries",
|
||||
Help: "Number of connection retries.",
|
||||
Default: 3,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
NB if you set this to > 1 then the checksums of multpart uploads
|
||||
become corrupted (the uploads themselves are not corrupted though).
|
||||
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: 1,
|
||||
Advanced: true,
|
||||
Name: "connection_retries",
|
||||
Help: "Number of connnection retry.\nLeave blank will use the default value \"3\".",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Constants
|
||||
const (
|
||||
listLimitSize = 1000 // Number of items to read at once
|
||||
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
|
||||
minChunkSize = fs.SizeSuffix(minMultiPartSize)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
listLimitSize = 1000 // Number of items to read at once
|
||||
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -130,31 +95,17 @@ func timestampToTime(tp int64) time.Time {
|
||||
return tm.UTC()
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
}
|
||||
|
||||
// Fs represents a remote qingstor server
|
||||
type Fs struct {
|
||||
name string // The name of the remote
|
||||
root string // The root is a subdir, is a special object
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *qs.Service // The connection to the qingstor server
|
||||
zone string // The zone we are working on
|
||||
bucket string // The bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
root string // The root is a subdir, is a special object
|
||||
features *fs.Features // optional features
|
||||
svc *qs.Service // The connection to the qingstor server
|
||||
}
|
||||
|
||||
// Object describes a qingstor object
|
||||
@@ -175,12 +126,10 @@ type Object struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Pattern to match a qingstor path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a qingstor 'url'
|
||||
func qsParsePath(path string) (bucket, key string, err error) {
|
||||
// Pattern to match a qingstor path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
if parts == nil {
|
||||
err = errors.Errorf("Couldn't parse bucket out of qingstor path %q", path)
|
||||
@@ -216,12 +165,12 @@ func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
|
||||
}
|
||||
|
||||
// qsConnection makes a connection to qingstor
|
||||
func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
accessKeyID := opt.AccessKeyID
|
||||
secretAccessKey := opt.SecretAccessKey
|
||||
func qsServiceConnection(name string) (*qs.Service, error) {
|
||||
accessKeyID := config.FileGet(name, "access_key_id")
|
||||
secretAccessKey := config.FileGet(name, "secret_access_key")
|
||||
|
||||
switch {
|
||||
case opt.EnvAuth:
|
||||
case config.FileGetBool(name, "env_auth", false):
|
||||
// No need for empty checks if "env_auth" is true
|
||||
case accessKeyID == "" && secretAccessKey == "":
|
||||
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
||||
@@ -235,7 +184,7 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
host := "qingstor.com"
|
||||
port := 443
|
||||
|
||||
endpoint := opt.Endpoint
|
||||
endpoint := config.FileGet(name, "endpoint", "")
|
||||
if endpoint != "" {
|
||||
_protocol, _host, _port, err := qsParseEndpoint(endpoint)
|
||||
|
||||
@@ -255,87 +204,48 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
|
||||
}
|
||||
|
||||
cf, err := qsConfig.NewDefault()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
connectionRetries := 3
|
||||
retries := config.FileGet(name, "connection_retries", "")
|
||||
if retries != "" {
|
||||
connectionRetries, _ = strconv.Atoi(retries)
|
||||
}
|
||||
|
||||
cf, err := qsConfig.NewDefault()
|
||||
cf.AccessKeyID = accessKeyID
|
||||
cf.SecretAccessKey = secretAccessKey
|
||||
cf.Protocol = protocol
|
||||
cf.Host = host
|
||||
cf.Port = port
|
||||
cf.ConnectionRetries = opt.ConnectionRetries
|
||||
cf.ConnectionRetries = connectionRetries
|
||||
cf.Connection = fshttp.NewClient(fs.Config)
|
||||
|
||||
return qs.Init(cf)
|
||||
}
|
||||
svc, _ := qs.Init(cf)
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
return svc, err
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: chunk size")
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: upload cutoff")
|
||||
}
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
bucket, key, err := qsParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
svc, err := qsServiceConnection(opt)
|
||||
svc, err := qsServiceConnection(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.Zone == "" {
|
||||
opt.Zone = "pek3a"
|
||||
zone := config.FileGet(name, "zone")
|
||||
if zone == "" {
|
||||
zone = "pek3a"
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
zone: zone,
|
||||
root: key,
|
||||
opt: *opt,
|
||||
svc: svc,
|
||||
zone: opt.Zone,
|
||||
bucket: bucket,
|
||||
svc: svc,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -348,7 +258,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.root += "/"
|
||||
}
|
||||
//Check to see if the object exists
|
||||
bucketInit, err := svc.Bucket(bucket, opt.Zone)
|
||||
bucketInit, err := svc.Bucket(bucket, zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -994,24 +904,16 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
mimeType := fs.MimeType(src)
|
||||
|
||||
req := uploadInput{
|
||||
body: in,
|
||||
qsSvc: o.fs.svc,
|
||||
bucket: o.fs.bucket,
|
||||
zone: o.fs.zone,
|
||||
key: key,
|
||||
mimeType: mimeType,
|
||||
partSize: int64(o.fs.opt.ChunkSize),
|
||||
concurrency: o.fs.opt.UploadConcurrency,
|
||||
body: in,
|
||||
qsSvc: o.fs.svc,
|
||||
bucket: o.fs.bucket,
|
||||
zone: o.fs.zone,
|
||||
key: key,
|
||||
mimeType: mimeType,
|
||||
}
|
||||
uploader := newUploader(&req)
|
||||
|
||||
size := src.Size()
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if multipart {
|
||||
err = uploader.upload()
|
||||
} else {
|
||||
err = uploader.singlePartUpload(in, size)
|
||||
}
|
||||
err = uploader.upload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
// +build !plan9
|
||||
|
||||
package qingstor
|
||||
package qingstor_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/backend/qingstor"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -15,19 +15,6 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestQingStor:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
NilObject: (*qingstor.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -152,11 +152,11 @@ func (u *uploader) init() {
|
||||
}
|
||||
|
||||
// singlePartUpload upload a single object that contentLength less than "defaultUploadPartSize"
|
||||
func (u *uploader) singlePartUpload(buf io.Reader, size int64) error {
|
||||
func (u *uploader) singlePartUpload(buf io.ReadSeeker) error {
|
||||
bucketInit, _ := u.bucketInit()
|
||||
|
||||
req := qs.PutObjectInput{
|
||||
ContentLength: &size,
|
||||
ContentLength: &u.readerPos,
|
||||
ContentType: &u.cfg.mimeType,
|
||||
Body: buf,
|
||||
}
|
||||
@@ -179,13 +179,13 @@ func (u *uploader) upload() error {
|
||||
// Do one read to determine if we have more than one part
|
||||
reader, _, err := u.nextReader()
|
||||
if err == io.EOF { // single part
|
||||
fs.Debugf(u, "Uploading as single part object to QingStor")
|
||||
return u.singlePartUpload(reader, u.readerPos)
|
||||
fs.Debugf(u, "Tried to upload a singile object to QingStor")
|
||||
return u.singlePartUpload(reader)
|
||||
} else if err != nil {
|
||||
return errors.Errorf("read upload data failed: %s", err)
|
||||
}
|
||||
|
||||
fs.Debugf(u, "Uploading as multi-part object to QingStor")
|
||||
fs.Debugf(u, "Treied to upload a multi-part object to QingStor")
|
||||
mu := multiUploader{uploader: u}
|
||||
return mu.multiPartUpload(reader)
|
||||
}
|
||||
@@ -261,7 +261,7 @@ func (mu *multiUploader) initiate() error {
|
||||
req := qs.InitiateMultipartUploadInput{
|
||||
ContentType: &mu.cfg.mimeType,
|
||||
}
|
||||
fs.Debugf(mu, "Initiating a multi-part upload")
|
||||
fs.Debugf(mu, "Tried to initiate a multi-part upload")
|
||||
rsp, err := bucketInit.InitiateMultipartUpload(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
mu.uploadID = rsp.UploadID
|
||||
@@ -279,12 +279,12 @@ func (mu *multiUploader) send(c chunk) error {
|
||||
ContentLength: &c.size,
|
||||
Body: c.buffer,
|
||||
}
|
||||
fs.Debugf(mu, "Uploading a part to QingStor with partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
fs.Debugf(mu, "Tried to upload a part to QingStor that partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
_, err := bucketInit.UploadMultipart(mu.cfg.key, &req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debugf(mu, "Done uploading part partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
fs.Debugf(mu, "Upload part finished that partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
|
||||
mu.mtx.Lock()
|
||||
defer mu.mtx.Unlock()
|
||||
@@ -304,7 +304,7 @@ func (mu *multiUploader) list() error {
|
||||
req := qs.ListMultipartInput{
|
||||
UploadID: mu.uploadID,
|
||||
}
|
||||
fs.Debugf(mu, "Reading multi-part details")
|
||||
fs.Debugf(mu, "Tried to list a multi-part")
|
||||
rsp, err := bucketInit.ListMultipart(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
mu.objectParts = rsp.ObjectParts
|
||||
@@ -331,7 +331,7 @@ func (mu *multiUploader) complete() error {
|
||||
ObjectParts: mu.objectParts,
|
||||
ETag: &md5String,
|
||||
}
|
||||
fs.Debugf(mu, "Completing multi-part object")
|
||||
fs.Debugf(mu, "Tried to complete a multi-part")
|
||||
_, err = bucketInit.CompleteMultipartUpload(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
fs.Debugf(mu, "Complete multi-part finished")
|
||||
@@ -348,7 +348,7 @@ func (mu *multiUploader) abort() error {
|
||||
req := qs.AbortMultipartUploadInput{
|
||||
UploadID: uploadID,
|
||||
}
|
||||
fs.Debugf(mu, "Aborting multi-part object %q", *uploadID)
|
||||
fs.Debugf(mu, "Tried to abort a multi-part")
|
||||
_, err = bucketInit.AbortMultipartUpload(mu.cfg.key, &req)
|
||||
}
|
||||
|
||||
@@ -392,14 +392,6 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) error {
|
||||
var nextChunkLen int
|
||||
reader, nextChunkLen, err = mu.nextReader()
|
||||
if err != nil && err != io.EOF {
|
||||
// empty ch
|
||||
go func() {
|
||||
for range ch {
|
||||
}
|
||||
}()
|
||||
// Wait for all goroutines finish
|
||||
close(ch)
|
||||
mu.wg.Wait()
|
||||
return err
|
||||
}
|
||||
if nextChunkLen == 0 && partNumber > 0 {
|
||||
|
||||
789
backend/s3/s3.go
789
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,10 @@
|
||||
// Test S3 filesystem interface
|
||||
package s3
|
||||
package s3_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/backend/s3"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,19 +12,6 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestS3:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
NilObject: (*s3.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -44,7 +44,16 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
req.Header.Set("Date", date)
|
||||
|
||||
// Sort out URI
|
||||
uri := req.URL.EscapedPath()
|
||||
uri := req.URL.Opaque
|
||||
if uri != "" {
|
||||
if strings.HasPrefix(uri, "//") {
|
||||
// Strip off //host/uri
|
||||
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
|
||||
req.URL.Opaque = uri // reset to plain URI otherwise Ceph gets confused
|
||||
}
|
||||
} else {
|
||||
uri = req.URL.Path
|
||||
}
|
||||
if uri == "" {
|
||||
uri = "/"
|
||||
}
|
||||
|
||||
@@ -20,15 +20,14 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/sftp"
|
||||
sshagent "github.com/xanzy/ssh-agent"
|
||||
"github.com/xanzy/ssh-agent"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
@@ -39,6 +38,10 @@ const (
|
||||
|
||||
var (
|
||||
currentUser = readCurrentUser()
|
||||
|
||||
// Flags
|
||||
sftpAskPassword = flags.BoolP("sftp-ask-password", "", false, "Allow asking for SFTP password when needed.")
|
||||
sshPathOverride = flags.StringP("ssh-path-override", "", "", "Override path used by SSH connection.")
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -49,43 +52,32 @@ func init() {
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "SSH host to connect to",
|
||||
Required: true,
|
||||
Optional: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "example.com",
|
||||
Help: "Connect to example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SSH username, leave blank for current username, " + currentUser,
|
||||
Name: "user",
|
||||
Help: "SSH username, leave blank for current username, " + currentUser,
|
||||
Optional: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SSH port, leave blank to use default (22)",
|
||||
Name: "port",
|
||||
Help: "SSH port, leave blank to use default (22)",
|
||||
Optional: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "SSH password, leave blank to use ssh-agent.",
|
||||
Optional: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_file",
|
||||
Help: "Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.",
|
||||
Name: "key_file",
|
||||
Help: "Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.",
|
||||
Optional: true,
|
||||
}, {
|
||||
Name: "key_file_pass",
|
||||
Help: `The passphrase to decrypt the PEM-encoded private key file.
|
||||
|
||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_use_agent",
|
||||
Help: `When set forces the usage of the ssh-agent.
|
||||
|
||||
When key-file is also set, the ".pub" file of the specified key-file is read and only the associated key is
|
||||
requested from the ssh-agent. This allows to avoid ` + "`Too many authentication failures for *username*`" + ` errors
|
||||
when the ssh-agent contains many keys.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "use_insecure_cipher",
|
||||
Help: "Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.",
|
||||
Default: false,
|
||||
Name: "use_insecure_cipher",
|
||||
Help: "Enable the user of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.",
|
||||
Optional: true,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "false",
|
||||
@@ -96,69 +88,30 @@ when the ssh-agent contains many keys.`,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "disable_hashcheck",
|
||||
Default: false,
|
||||
Help: "Disable the execution of SSH commands to determine if remote file hashing is available.\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
|
||||
}, {
|
||||
Name: "ask_password",
|
||||
Default: false,
|
||||
Help: "Allow asking for SFTP password when needed.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "path_override",
|
||||
Default: "",
|
||||
Help: `Override path used by SSH connection.
|
||||
|
||||
This allows checksum calculation when SFTP and SSH paths are
|
||||
different. This issue affects among others Synology NAS boxes.
|
||||
|
||||
Shared folders can be found in directories representing volumes
|
||||
|
||||
rclone sync /home/local/directory remote:/directory --ssh-path-override /volume2/directory
|
||||
|
||||
Home directory can be found in a shared folder called "home"
|
||||
|
||||
rclone sync /home/local/directory remote:/home/directory --ssh-path-override /volume1/homes/USER/directory`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "set_modtime",
|
||||
Default: true,
|
||||
Help: "Set the modified time on the remote if set.",
|
||||
Advanced: true,
|
||||
Name: "disable_hashcheck",
|
||||
Help: "Disable the execution of SSH commands to determine if remote file hashing is available. Leave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
|
||||
Optional: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
config *ssh.ClientConfig
|
||||
url string
|
||||
mkdirLock *stringLock
|
||||
cachedHashes *hash.Set
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
connLimit *rate.Limiter // for limiting number of connections per second
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
config *ssh.ClientConfig
|
||||
host string
|
||||
port string
|
||||
url string
|
||||
mkdirLock *stringLock
|
||||
cachedHashes *hash.Set
|
||||
hashcheckDisabled bool
|
||||
setModtime bool
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
connLimit *rate.Limiter // for limiting number of connections per second
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -244,7 +197,7 @@ func (f *Fs) sftpConnection() (c *conn, err error) {
|
||||
c = &conn{
|
||||
err: make(chan error, 1),
|
||||
}
|
||||
c.sshClient, err = Dial("tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||
c.sshClient, err = Dial("tcp", f.host+":"+f.port, f.config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect SSH")
|
||||
}
|
||||
@@ -315,48 +268,37 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// shellExpand replaces a leading "~" with "${HOME}" and expands all environment
|
||||
// variables afterwards.
|
||||
func shellExpand(s string) string {
|
||||
if s != "" {
|
||||
if s[0] == '~' {
|
||||
s = "${HOME}" + s[1:]
|
||||
}
|
||||
s = os.ExpandEnv(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
user := config.FileGet(name, "user")
|
||||
host := config.FileGet(name, "host")
|
||||
port := config.FileGet(name, "port")
|
||||
pass := config.FileGet(name, "pass")
|
||||
keyFile := config.FileGet(name, "key_file")
|
||||
insecureCipher := config.FileGetBool(name, "use_insecure_cipher")
|
||||
hashcheckDisabled := config.FileGetBool(name, "disable_hashcheck")
|
||||
setModtime := config.FileGetBool(name, "set_modtime", true)
|
||||
if user == "" {
|
||||
user = currentUser
|
||||
}
|
||||
if opt.User == "" {
|
||||
opt.User = currentUser
|
||||
}
|
||||
if opt.Port == "" {
|
||||
opt.Port = "22"
|
||||
if port == "" {
|
||||
port = "22"
|
||||
}
|
||||
sshConfig := &ssh.ClientConfig{
|
||||
User: opt.User,
|
||||
User: user,
|
||||
Auth: []ssh.AuthMethod{},
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
Timeout: fs.Config.ConnectTimeout,
|
||||
}
|
||||
|
||||
if opt.UseInsecureCipher {
|
||||
if insecureCipher {
|
||||
sshConfig.Config.SetDefaults()
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
|
||||
}
|
||||
|
||||
keyFile := shellExpand(opt.KeyFile)
|
||||
// Add ssh agent-auth if no password or file specified
|
||||
if (opt.Pass == "" && keyFile == "") || opt.KeyUseAgent {
|
||||
if pass == "" && keyFile == "" {
|
||||
sshAgentClient, _, err := sshagent.New()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
|
||||
@@ -365,30 +307,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read ssh agent signers")
|
||||
}
|
||||
if keyFile != "" {
|
||||
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read public key file")
|
||||
}
|
||||
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse public key file")
|
||||
}
|
||||
pubM := pub.Marshal()
|
||||
found := false
|
||||
for _, s := range signers {
|
||||
if bytes.Equal(pubM, s.PublicKey().Marshal()) {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(s))
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, errors.New("private key not found in the ssh-agent")
|
||||
}
|
||||
} else {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
|
||||
}
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
|
||||
}
|
||||
|
||||
// Load key file if specified
|
||||
@@ -397,14 +316,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read private key file")
|
||||
}
|
||||
clearpass := ""
|
||||
if opt.KeyFilePass != "" {
|
||||
clearpass, err = obscure.Reveal(opt.KeyFilePass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKeyWithPassphrase(key, []byte(clearpass))
|
||||
signer, err := ssh.ParsePrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse private key file")
|
||||
}
|
||||
@@ -412,8 +324,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Auth from password if specified
|
||||
if opt.Pass != "" {
|
||||
clearpass, err := obscure.Reveal(opt.Pass)
|
||||
if pass != "" {
|
||||
clearpass, err := obscure.Reveal(pass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -421,20 +333,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Ask for password if none was defined and we're allowed to
|
||||
if opt.Pass == "" && opt.AskPassword {
|
||||
if pass == "" && *sftpAskPassword {
|
||||
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
||||
clearpass := config.ReadPassword()
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
config: sshConfig,
|
||||
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
|
||||
mkdirLock: newStringLock(),
|
||||
connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1),
|
||||
name: name,
|
||||
root: root,
|
||||
config: sshConfig,
|
||||
host: host,
|
||||
port: port,
|
||||
url: "sftp://" + user + "@" + host + ":" + port + "/" + root,
|
||||
hashcheckDisabled: hashcheckDisabled,
|
||||
setModtime: setModtime,
|
||||
mkdirLock: newStringLock(),
|
||||
connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
@@ -565,13 +480,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// If file is a symlink (not a regular file is the best cross platform test we can do), do a stat to
|
||||
// pick up the size and type of the destination, instead of the size and type of the symlink.
|
||||
if !info.Mode().IsRegular() {
|
||||
oldInfo := info
|
||||
info, err = f.stat(remote)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
fs.Errorf(remote, "stat of non-regular file/dir failed: %v", err)
|
||||
}
|
||||
info = oldInfo
|
||||
return nil, errors.Wrap(err, "stat of non-regular file/dir failed")
|
||||
}
|
||||
}
|
||||
if info.IsDir() {
|
||||
@@ -658,22 +569,12 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
// Check to see if directory is empty as some servers will
|
||||
// delete recursively with RemoveDirectory
|
||||
entries, err := f.List(dir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
// Remove the directory
|
||||
root := path.Join(f.root, dir)
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
err = c.sftpClient.RemoveDirectory(root)
|
||||
err = c.sftpClient.Remove(root)
|
||||
f.putSftpConnection(&c, err)
|
||||
return err
|
||||
}
|
||||
@@ -762,7 +663,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return *f.cachedHashes
|
||||
}
|
||||
|
||||
if f.opt.DisableHashCheck {
|
||||
if f.hashcheckDisabled {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
@@ -843,10 +744,6 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
if o.fs.opt.DisableHashCheck {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
c, err := o.fs.getSftpConnection()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Hash get SFTP connection")
|
||||
@@ -861,8 +758,8 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
escapedPath := shellEscape(o.path())
|
||||
if o.fs.opt.PathOverride != "" {
|
||||
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
|
||||
if *sshPathOverride != "" {
|
||||
escapedPath = shellEscape(path.Join(*sshPathOverride, o.remote))
|
||||
}
|
||||
err = session.Run(hashCmd + " " + escapedPath)
|
||||
if err != nil {
|
||||
@@ -955,7 +852,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
if o.fs.opt.SetModTime {
|
||||
if o.fs.setModtime {
|
||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
|
||||
@@ -14,14 +14,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -30,34 +29,12 @@ import (
|
||||
const (
|
||||
directoryMarkerContentType = "application/directory" // content type of directory marker objects
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
defaultChunkSize = 5 * fs.GibiByte
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// SharedOptions are shared between swift and hubic
|
||||
var SharedOptions = []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
Help: `Above this size files will be chunked into a _segments container.
|
||||
|
||||
Above this size files will be chunked into a _segments container. The
|
||||
default for this is 5GB which is its maximum value.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_chunk",
|
||||
Help: `Don't chunk files during streaming upload.
|
||||
|
||||
When doing streaming uploads (eg using rcat or mount) setting this
|
||||
flag will cause the swift backend to not upload chunked files.
|
||||
|
||||
This will limit the maximum upload size to 5GB. However non chunked
|
||||
files are easier to deal with and have an MD5SUM.
|
||||
|
||||
Rclone will still chunk files bigger than chunk_size when doing normal
|
||||
copy operations.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}}
|
||||
// Globals
|
||||
var (
|
||||
chunkSize = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
@@ -65,10 +42,9 @@ func init() {
|
||||
Name: "swift",
|
||||
Description: "Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
|
||||
NewFs: NewFs,
|
||||
Options: append([]fs.Option{{
|
||||
Name: "env_auth",
|
||||
Help: "Get swift credentials from environment variables in standard OpenStack form.",
|
||||
Default: false,
|
||||
Options: []fs.Option{{
|
||||
Name: "env_auth",
|
||||
Help: "Get swift credentials from environment variables in standard OpenStack form.",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "false",
|
||||
@@ -131,22 +107,11 @@ func init() {
|
||||
Name: "auth_token",
|
||||
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)",
|
||||
}, {
|
||||
Name: "application_credential_id",
|
||||
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)",
|
||||
Name: "auth_version",
|
||||
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)",
|
||||
}, {
|
||||
Name: "application_credential_name",
|
||||
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)",
|
||||
}, {
|
||||
Name: "application_credential_secret",
|
||||
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)",
|
||||
}, {
|
||||
Name: "auth_version",
|
||||
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)",
|
||||
Default: 0,
|
||||
}, {
|
||||
Name: "endpoint_type",
|
||||
Help: "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)",
|
||||
Default: "public",
|
||||
Name: "endpoint_type",
|
||||
Help: "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)",
|
||||
Examples: []fs.OptionExample{{
|
||||
Help: "Public (default, choose this if not sure)",
|
||||
Value: "public",
|
||||
@@ -157,51 +122,10 @@ func init() {
|
||||
Help: "Admin",
|
||||
Value: "admin",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_policy",
|
||||
Help: `The storage policy to use when creating a new container
|
||||
|
||||
This applies the specified storage policy when creating a new
|
||||
container. The policy cannot be changed afterwards. The allowed
|
||||
configuration values and their meaning depend on your Swift storage
|
||||
provider.`,
|
||||
Default: "",
|
||||
Examples: []fs.OptionExample{{
|
||||
Help: "Default",
|
||||
Value: "",
|
||||
}, {
|
||||
Help: "OVH Public Cloud Storage",
|
||||
Value: "pcs",
|
||||
}, {
|
||||
Help: "OVH Public Cloud Archive",
|
||||
Value: "pca",
|
||||
}},
|
||||
}}, SharedOptions...),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
User string `config:"user"`
|
||||
Key string `config:"key"`
|
||||
Auth string `config:"auth"`
|
||||
UserID string `config:"user_id"`
|
||||
Domain string `config:"domain"`
|
||||
Tenant string `config:"tenant"`
|
||||
TenantID string `config:"tenant_id"`
|
||||
TenantDomain string `config:"tenant_domain"`
|
||||
Region string `config:"region"`
|
||||
StorageURL string `config:"storage_url"`
|
||||
AuthToken string `config:"auth_token"`
|
||||
AuthVersion int `config:"auth_version"`
|
||||
ApplicationCredentialId string `config:"application_credential_id"`
|
||||
ApplicationCredentialName string `config:"application_credential_name"`
|
||||
ApplicationCredentialSecret string `config:"application_credential_secret"`
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NoChunk bool `config:"no_chunk"`
|
||||
flags.VarP(&chunkSize, "swift-chunk-size", "", "Above this size files will be chunked into a _segments container.")
|
||||
}
|
||||
|
||||
// Fs represents a remote swift server
|
||||
@@ -209,27 +133,22 @@ type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
c *swift.Connection // the connection to the swift server
|
||||
container string // the container we are working on
|
||||
containerOKMu sync.Mutex // mutex to protect container OK
|
||||
containerOK bool // true if we have created the container
|
||||
segmentsContainer string // container to store the segments (if any) in
|
||||
noCheckContainer bool // don't check the container before creating it
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64
|
||||
lastModified time.Time
|
||||
contentType string
|
||||
md5 string
|
||||
headers swift.Headers // The object headers if known
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
info swift.Object // Info from the swift object if known
|
||||
headers swift.Headers // The object headers if known
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -260,34 +179,8 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
401, // Unauthorized (eg "Token has expired")
|
||||
408, // Request Timeout
|
||||
409, // Conflict - various states that could be resolved on a retry
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
503, // Service Unavailable/Slow Down - "Reduce your request rate"
|
||||
504, // Gateway Time-out
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
// If this is an swift.Error object extract the HTTP error code
|
||||
if swiftError, ok := err.(*swift.Error); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if swiftError.StatusCode == e {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check for generic failure conditions
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// Pattern to match a swift path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a swift 'url'
|
||||
func parsePath(path string) (container, directory string, err error) {
|
||||
@@ -302,30 +195,27 @@ func parsePath(path string) (container, directory string, err error) {
|
||||
}
|
||||
|
||||
// swiftConnection makes a connection to swift
|
||||
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
func swiftConnection(name string) (*swift.Connection, error) {
|
||||
c := &swift.Connection{
|
||||
// Keep these in the same order as the Config for ease of checking
|
||||
UserName: opt.User,
|
||||
ApiKey: opt.Key,
|
||||
AuthUrl: opt.Auth,
|
||||
UserId: opt.UserID,
|
||||
Domain: opt.Domain,
|
||||
Tenant: opt.Tenant,
|
||||
TenantId: opt.TenantID,
|
||||
TenantDomain: opt.TenantDomain,
|
||||
Region: opt.Region,
|
||||
StorageUrl: opt.StorageURL,
|
||||
AuthToken: opt.AuthToken,
|
||||
AuthVersion: opt.AuthVersion,
|
||||
ApplicationCredentialId: opt.ApplicationCredentialId,
|
||||
ApplicationCredentialName: opt.ApplicationCredentialName,
|
||||
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
|
||||
EndpointType: swift.EndpointType(opt.EndpointType),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(fs.Config),
|
||||
UserName: config.FileGet(name, "user"),
|
||||
ApiKey: config.FileGet(name, "key"),
|
||||
AuthUrl: config.FileGet(name, "auth"),
|
||||
UserId: config.FileGet(name, "user_id"),
|
||||
Domain: config.FileGet(name, "domain"),
|
||||
Tenant: config.FileGet(name, "tenant"),
|
||||
TenantId: config.FileGet(name, "tenant_id"),
|
||||
TenantDomain: config.FileGet(name, "tenant_domain"),
|
||||
Region: config.FileGet(name, "region"),
|
||||
StorageUrl: config.FileGet(name, "storage_url"),
|
||||
AuthToken: config.FileGet(name, "auth_token"),
|
||||
AuthVersion: config.FileGetInt(name, "auth_version", 0),
|
||||
EndpointType: swift.EndpointType(config.FileGet(name, "endpoint_type", "public")),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(fs.Config),
|
||||
}
|
||||
if opt.EnvAuth {
|
||||
if config.FileGetBool(name, "env_auth", false) {
|
||||
err := c.ApplyEnvironment()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read environment variables")
|
||||
@@ -333,13 +223,11 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
}
|
||||
StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint
|
||||
if !c.Authenticated() {
|
||||
if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret == "" {
|
||||
if c.UserName == "" && c.UserId == "" {
|
||||
return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
|
||||
}
|
||||
if c.ApiKey == "" {
|
||||
return nil, errors.New("key not found")
|
||||
}
|
||||
if c.UserName == "" && c.UserId == "" {
|
||||
return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
|
||||
}
|
||||
if c.ApiKey == "" {
|
||||
return nil, errors.New("key not found")
|
||||
}
|
||||
if c.AuthUrl == "" {
|
||||
return nil, errors.New("auth not found")
|
||||
@@ -353,55 +241,28 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
// provided by wrapping the existing auth, so we can just
|
||||
// override one or the other or both.
|
||||
if StorageUrl != "" || AuthToken != "" {
|
||||
// Re-write StorageURL and AuthToken if they are being
|
||||
// overridden as c.Authenticate above will have
|
||||
// overwritten them.
|
||||
if StorageUrl != "" {
|
||||
c.StorageUrl = StorageUrl
|
||||
}
|
||||
if AuthToken != "" {
|
||||
c.AuthToken = AuthToken
|
||||
}
|
||||
c.Auth = newAuth(c.Auth, StorageUrl, AuthToken)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFsWithConnection constructs an Fs from the path, container:path
|
||||
// NewFsWithConnection contstructs an Fs from the path, container:path
|
||||
// and authenticated connection.
|
||||
//
|
||||
// if noCheckContainer is set then the Fs won't check the container
|
||||
// exists before creating it.
|
||||
func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
|
||||
func NewFsWithConnection(name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
|
||||
container, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
container: container,
|
||||
segmentsContainer: container + "_segments",
|
||||
root: directory,
|
||||
noCheckContainer: noCheckContainer,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -411,11 +272,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists - ignoring directory markers
|
||||
var info swift.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, _, err = f.c.Object(container, directory)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
info, _, err := f.c.Object(container, directory)
|
||||
if err == nil && info.ContentType != directoryMarkerContentType {
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
@@ -431,23 +288,12 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
c, err := swiftConnection(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "swift: chunk size")
|
||||
}
|
||||
|
||||
c, err := swiftConnection(opt, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewFsWithConnection(opt, name, root, c, false)
|
||||
return NewFsWithConnection(name, root, c, false)
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
@@ -467,10 +313,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not headers
|
||||
err := o.decodeMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.info = *info
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
@@ -508,12 +351,7 @@ func (f *Fs) listContainerRoot(container, root string, dir string, recurse bool,
|
||||
}
|
||||
rootLength := len(root)
|
||||
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
var objects []swift.Object
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = f.c.Objects(container, opts)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
objects, err := f.c.Objects(container, opts)
|
||||
if err == nil {
|
||||
for i := range objects {
|
||||
object := &objects[i]
|
||||
@@ -602,11 +440,7 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
var containers []swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
containers, err := f.c.ContainersAll(nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "container listing failed")
|
||||
}
|
||||
@@ -667,12 +501,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
var containers []swift.Container
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
containers, err := f.c.ContainersAll(nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "container listing failed")
|
||||
}
|
||||
@@ -722,20 +551,10 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// Check to see if container exists first
|
||||
var err error = swift.ContainerNotFound
|
||||
if !f.noCheckContainer {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, _, err = f.c.Container(f.container)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
_, _, err = f.c.Container(f.container)
|
||||
}
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if f.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = f.opt.StoragePolicy
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerCreate(f.container, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
err = f.c.ContainerCreate(f.container, nil)
|
||||
}
|
||||
if err == nil {
|
||||
f.containerOK = true
|
||||
@@ -752,11 +571,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerDelete(f.container)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
err := f.c.ContainerDelete(f.container)
|
||||
if err == nil {
|
||||
f.containerOK = false
|
||||
}
|
||||
@@ -815,10 +630,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
_, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -867,7 +679,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
fs.Debugf(o, "Returning empty Md5sum for swift large object")
|
||||
return "", nil
|
||||
}
|
||||
return strings.ToLower(o.md5), nil
|
||||
return strings.ToLower(o.info.Hash), nil
|
||||
}
|
||||
|
||||
// hasHeader checks for the header passed in returning false if the
|
||||
@@ -896,22 +708,7 @@ func (o *Object) isStaticLargeObject() (bool, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata in the object from a swift.Object
|
||||
//
|
||||
// Sets
|
||||
// o.lastModified
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.contentType
|
||||
func (o *Object) decodeMetaData(info *swift.Object) (err error) {
|
||||
o.lastModified = info.LastModified
|
||||
o.size = info.Bytes
|
||||
o.md5 = info.Hash
|
||||
o.contentType = info.ContentType
|
||||
return nil
|
||||
return o.info.Bytes
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
@@ -923,23 +720,15 @@ func (o *Object) readMetaData() (err error) {
|
||||
if o.headers != nil {
|
||||
return nil
|
||||
}
|
||||
var info swift.Object
|
||||
var h swift.Headers
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, h, err = o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
info, h, err := o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
|
||||
if err != nil {
|
||||
if err == swift.ObjectNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
o.info = info
|
||||
o.headers = h
|
||||
err = o.decodeMetaData(&info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -950,17 +739,17 @@ func (o *Object) readMetaData() (err error) {
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
if fs.Config.UseServerModTime {
|
||||
return o.lastModified
|
||||
return o.info.LastModified
|
||||
}
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read metadata: %s", err)
|
||||
return o.lastModified
|
||||
return o.info.LastModified
|
||||
}
|
||||
modTime, err := o.headers.ObjectMetadata().GetModTime()
|
||||
if err != nil {
|
||||
// fs.Logf(o, "Failed to read mtime from object: %v", err)
|
||||
return o.lastModified
|
||||
return o.info.LastModified
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
@@ -983,10 +772,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
newHeaders[k] = v
|
||||
}
|
||||
}
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
@@ -994,17 +780,14 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
// It compares the Content-Type to directoryMarkerContentType - that
|
||||
// makes it a directory marker which is not storable.
|
||||
func (o *Object) Storable() bool {
|
||||
return o.contentType != directoryMarkerContentType
|
||||
return o.info.ContentType != directoryMarkerContentType
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
_, isRanging := headers["Range"]
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1031,20 +814,13 @@ func (o *Object) removeSegments(except string) error {
|
||||
}
|
||||
segmentPath := segmentsRoot + remote
|
||||
fs.Debugf(o, "Removing segment file %q in container %q", segmentPath, o.fs.segmentsContainer)
|
||||
var err error
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the segments container if empty, ignore errors
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
|
||||
if err == nil {
|
||||
fs.Debugf(o, "Removed empty container %q", o.fs.segmentsContainer)
|
||||
}
|
||||
@@ -1073,19 +849,9 @@ func urlEncode(str string) string {
|
||||
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
|
||||
// Create the segmentsContainer if it doesn't exist
|
||||
var err error
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, _, err = o.fs.c.Container(o.fs.segmentsContainer)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
_, _, err = o.fs.c.Container(o.fs.segmentsContainer)
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if o.fs.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, nil)
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -1105,7 +871,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
fs.Debugf(o, "Uploading segments into %q seems done (%v)", o.fs.segmentsContainer, err)
|
||||
break
|
||||
}
|
||||
n := int64(o.fs.opt.ChunkSize)
|
||||
n := int64(chunkSize)
|
||||
if size != -1 {
|
||||
n = min(left, n)
|
||||
headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it
|
||||
@@ -1114,10 +880,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
segmentReader := io.LimitReader(in, n)
|
||||
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
|
||||
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
_, err := o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -1128,10 +891,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
headers["Content-Length"] = "0" // set Content-Length as we know it
|
||||
emptyReader := bytes.NewReader(nil)
|
||||
manifestName := o.fs.root + o.remote
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
|
||||
return uniquePrefix + "/", err
|
||||
}
|
||||
|
||||
@@ -1161,31 +921,17 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
contentType := fs.MimeType(src)
|
||||
headers := m.ObjectHeaders()
|
||||
uniquePrefix := ""
|
||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||
if size > int64(chunkSize) || size == -1 {
|
||||
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.headers = nil // wipe old metadata
|
||||
} else {
|
||||
if size >= 0 {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
|
||||
}
|
||||
var rxHeaders swift.Headers
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length as we know it
|
||||
_, err := o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// set Metadata since ObjectPut checked the hash and length so we know the
|
||||
// object has been safely uploaded
|
||||
o.lastModified = modTime
|
||||
o.size = size
|
||||
o.md5 = rxHeaders["ETag"]
|
||||
o.contentType = contentType
|
||||
o.headers = headers
|
||||
}
|
||||
|
||||
// If file was a dynamic large object then remove old/all segments
|
||||
@@ -1196,7 +942,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object if necessary
|
||||
// Read the metadata from the newly created object
|
||||
o.headers = nil // wipe old metadata
|
||||
return o.readMetaData()
|
||||
}
|
||||
|
||||
@@ -1207,10 +954,7 @@ func (o *Object) Remove() error {
|
||||
return err
|
||||
}
|
||||
// Remove file/manifest first
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1226,7 +970,7 @@ func (o *Object) Remove() error {
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
return o.contentType
|
||||
return o.info.ContentType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test Swift filesystem interface
|
||||
package swift
|
||||
package swift_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,12 +12,6 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSwift:",
|
||||
NilObject: (*Object)(nil),
|
||||
NilObject: (*swift.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -1,428 +0,0 @@
|
||||
package union
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "union",
|
||||
Description: "A stackable unification remote, which can appear to merge the contents of several remotes",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remotes",
|
||||
Help: "List of space separated remotes.\nCan be 'remotea:test/dir remoteb:', '\"remotea:test/space dir\" remoteb:', etc.\nThe last remote is used to write to.",
|
||||
Required: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remotes fs.SpaceSepList `config:"remotes"`
|
||||
}
|
||||
|
||||
// Fs represents a union of remotes
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
root string // the path we are working on
|
||||
remotes []fs.Fs // slice of remotes
|
||||
wr fs.Fs // writable remote
|
||||
hashSet hash.Set // intersection of hash types
|
||||
}
|
||||
|
||||
// Object describes a union Object
|
||||
//
|
||||
// This is a wrapped object which returns the Union Fs as its parent
|
||||
type Object struct {
|
||||
fs.Object
|
||||
fs *Fs // what this object is part of
|
||||
}
|
||||
|
||||
// Wrap an existing object in the union Object
|
||||
func (f *Fs) wrapObject(o fs.Object) *Object {
|
||||
return &Object{
|
||||
Object: o,
|
||||
fs: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns the union Fs as the parent
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("union root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return f.wr.Rmdir(dir)
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return f.hashSet
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
return f.wr.Mkdir(dir)
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge() error {
|
||||
return f.wr.Features().Purge()
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
if src.Fs() != f.wr {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
o, err := f.wr.Features().Copy(src, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
if src.Fs() != f.wr {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
o, err := f.wr.Features().Move(src, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return f.wr.Features().DirMove(srcFs.wr, srcRemote, dstRemote)
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
// At least one value will be written to the channel,
|
||||
// specifying the initial value and updated values might
|
||||
// follow. A 0 Duration should pause the polling.
|
||||
// The ChangeNotify implemantion must empty the channel
|
||||
// regulary. When the channel gets closed, the implemantion
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(fn func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
var remoteChans []chan time.Duration
|
||||
|
||||
for _, remote := range f.remotes {
|
||||
if ChangeNotify := remote.Features().ChangeNotify; ChangeNotify != nil {
|
||||
ch := make(chan time.Duration)
|
||||
remoteChans = append(remoteChans, ch)
|
||||
ChangeNotify(fn, ch)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
for i := range ch {
|
||||
for _, c := range remoteChans {
|
||||
c <- i
|
||||
}
|
||||
}
|
||||
for _, c := range remoteChans {
|
||||
close(c)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
for _, remote := range f.remotes {
|
||||
if DirCacheFlush := remote.Features().DirCacheFlush; DirCacheFlush != nil {
|
||||
DirCacheFlush()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.wr.Features().PutStream(in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
return f.wr.Features().About()
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.wr.Put(in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
set := make(map[string]fs.DirEntry)
|
||||
found := false
|
||||
for _, remote := range f.remotes {
|
||||
var remoteEntries, err = remote.List(dir)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "List failed on %v", remote)
|
||||
}
|
||||
found = true
|
||||
for _, remoteEntry := range remoteEntries {
|
||||
set[remoteEntry.Remote()] = remoteEntry
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
for _, entry := range set {
|
||||
if o, ok := entry.(fs.Object); ok {
|
||||
entry = f.wrapObject(o)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject creates a new remote union file object based on the first Object it finds (reverse remote order)
|
||||
func (f *Fs) NewObject(path string) (fs.Object, error) {
|
||||
for i := range f.remotes {
|
||||
var remote = f.remotes[len(f.remotes)-i-1]
|
||||
var obj, err = remote.NewObject(path)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "NewObject failed on %v", remote)
|
||||
}
|
||||
return f.wrapObject(obj), nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Precision is the greatest Precision of all remotes
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
var greatestPrecision time.Duration
|
||||
for _, remote := range f.remotes {
|
||||
if remote.Precision() > greatestPrecision {
|
||||
greatestPrecision = remote.Precision()
|
||||
}
|
||||
}
|
||||
return greatestPrecision
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(opt.Remotes) == 0 {
|
||||
return nil, errors.New("union can't point to an empty remote - check the value of the remotes setting")
|
||||
}
|
||||
if len(opt.Remotes) == 1 {
|
||||
return nil, errors.New("union can't point to a single remote - check the value of the remotes setting")
|
||||
}
|
||||
for _, remote := range opt.Remotes {
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point union remote at itself - check the value of the remote setting")
|
||||
}
|
||||
}
|
||||
|
||||
var remotes []fs.Fs
|
||||
for i := range opt.Remotes {
|
||||
// Last remote first so we return the correct (last) matching fs in case of fs.ErrorIsFile
|
||||
var remote = opt.Remotes[len(opt.Remotes)-i-1]
|
||||
_, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var rootString = path.Join(fsPath, filepath.ToSlash(root))
|
||||
if configName != "local" {
|
||||
rootString = configName + ":" + rootString
|
||||
}
|
||||
myFs, err := fs.NewFs(rootString)
|
||||
if err != nil {
|
||||
if err == fs.ErrorIsFile {
|
||||
return myFs, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
remotes = append(remotes, myFs)
|
||||
}
|
||||
|
||||
// Reverse the remotes again so they are in the order as before
|
||||
for i, j := 0, len(remotes)-1; i < j; i, j = i+1, j-1 {
|
||||
remotes[i], remotes[j] = remotes[j], remotes[i]
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
remotes: remotes,
|
||||
wr: remotes[len(remotes)-1],
|
||||
}
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
features = features.Mask(f.wr) // mask the features just on the writable fs
|
||||
|
||||
// Really need the union of all remotes for these, so
|
||||
// re-instate and calculate separately.
|
||||
features.ChangeNotify = f.ChangeNotify
|
||||
features.DirCacheFlush = f.DirCacheFlush
|
||||
|
||||
// FIXME maybe should be masking the bools here?
|
||||
|
||||
// Clear ChangeNotify and DirCacheFlush if all are nil
|
||||
clearChangeNotify := true
|
||||
clearDirCacheFlush := true
|
||||
for _, remote := range f.remotes {
|
||||
remoteFeatures := remote.Features()
|
||||
if remoteFeatures.ChangeNotify != nil {
|
||||
clearChangeNotify = false
|
||||
}
|
||||
if remoteFeatures.DirCacheFlush != nil {
|
||||
clearDirCacheFlush = false
|
||||
}
|
||||
}
|
||||
if clearChangeNotify {
|
||||
features.ChangeNotify = nil
|
||||
}
|
||||
if clearDirCacheFlush {
|
||||
features.DirCacheFlush = nil
|
||||
}
|
||||
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
hashSet := f.remotes[0].Hashes()
|
||||
for _, remote := range f.remotes[1:] {
|
||||
hashSet = hashSet.Overlap(remote.Hashes())
|
||||
}
|
||||
f.hashSet = hashSet
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
)
|
||||
@@ -1,18 +0,0 @@
|
||||
// Test Union filesystem interface
|
||||
package union_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestUnion:",
|
||||
NilObject: nil,
|
||||
SkipFsMatch: true,
|
||||
})
|
||||
}
|
||||
@@ -6,19 +6,12 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
)
|
||||
|
||||
const (
|
||||
// Wed, 27 Sep 2017 14:28:34 GMT
|
||||
timeFormat = time.RFC1123
|
||||
// The same as time.RFC1123 with optional leading zeros on the date
|
||||
// see https://github.com/ncw/rclone/issues/2574
|
||||
noZerosRFC1123 = "Mon, _2 Jan 2006 15:04:05 MST"
|
||||
)
|
||||
|
||||
// Multistatus contains responses returned from an HTTP 207 return code
|
||||
@@ -66,12 +59,11 @@ type Response struct {
|
||||
// Note that status collects all the status values for which we just
|
||||
// check the first is OK.
|
||||
type Prop struct {
|
||||
Status []string `xml:"DAV: status"`
|
||||
Name string `xml:"DAV: prop>displayname,omitempty"`
|
||||
Type *xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"`
|
||||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
Checksums []string `xml:"prop>checksums>checksum,omitempty"`
|
||||
Status []string `xml:"DAV: status"`
|
||||
Name string `xml:"DAV: prop>displayname,omitempty"`
|
||||
Type *xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"`
|
||||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
}
|
||||
|
||||
// Parse a status of the form "HTTP/1.1 200 OK" or "HTTP/1.1 200"
|
||||
@@ -97,26 +89,6 @@ func (p *Prop) StatusOK() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Hashes returns a map of all checksums - may be nil
|
||||
func (p *Prop) Hashes() (hashes map[hash.Type]string) {
|
||||
if len(p.Checksums) == 0 {
|
||||
return nil
|
||||
}
|
||||
hashes = make(map[hash.Type]string)
|
||||
for _, checksums := range p.Checksums {
|
||||
checksums = strings.ToLower(checksums)
|
||||
for _, checksum := range strings.Split(checksums, " ") {
|
||||
switch {
|
||||
case strings.HasPrefix(checksum, "sha1:"):
|
||||
hashes[hash.SHA1] = checksum[5:]
|
||||
case strings.HasPrefix(checksum, "md5:"):
|
||||
hashes[hash.MD5] = checksum[4:]
|
||||
}
|
||||
}
|
||||
}
|
||||
return hashes
|
||||
}
|
||||
|
||||
// PropValue is a tagged name and value
|
||||
type PropValue struct {
|
||||
XMLName xml.Name `xml:""`
|
||||
@@ -166,15 +138,11 @@ func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
|
||||
// Possible time formats to parse the time with
|
||||
var timeFormats = []string{
|
||||
timeFormat, // Wed, 27 Sep 2017 14:28:34 GMT (as per RFC)
|
||||
time.RFC1123Z, // Fri, 05 Jan 2018 14:14:38 +0000 (as used by mydrive.ch)
|
||||
time.UnixDate, // Wed May 17 15:31:58 UTC 2017 (as used in an internal server)
|
||||
noZerosRFC1123, // Fri, 7 Sep 2018 08:49:58 GMT (as used by server in #2574)
|
||||
time.RFC3339, // Wed, 31 Oct 2018 13:57:11 CET (as used by komfortcloud.de)
|
||||
timeFormat, // Wed, 27 Sep 2017 14:28:34 GMT (as per RFC)
|
||||
time.RFC1123Z, // Fri, 05 Jan 2018 14:14:38 +0000 (as used by mydrive.ch)
|
||||
time.UnixDate, // Wed May 17 15:31:58 UTC 2017 (as used in an internal server)
|
||||
}
|
||||
|
||||
var oneTimeError sync.Once
|
||||
|
||||
// UnmarshalXML turns XML into a Time
|
||||
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var v string
|
||||
@@ -183,12 +151,6 @@ func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// If time is missing then return the epoch
|
||||
if v == "" {
|
||||
*t = Time(time.Unix(0, 0))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse the time format in multiple possible ways
|
||||
var newT time.Time
|
||||
for _, timeFormat := range timeFormats {
|
||||
@@ -198,33 +160,5 @@ func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
oneTimeError.Do(func() {
|
||||
fs.Errorf(nil, "Failed to parse time %q - using the epoch", v)
|
||||
})
|
||||
// Return the epoch instead
|
||||
*t = Time(time.Unix(0, 0))
|
||||
// ignore error
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Quota is used to read the bytes used and available
|
||||
//
|
||||
// <d:multistatus xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns">
|
||||
// <d:response>
|
||||
// <d:href>/remote.php/webdav/</d:href>
|
||||
// <d:propstat>
|
||||
// <d:prop>
|
||||
// <d:quota-available-bytes>-3</d:quota-available-bytes>
|
||||
// <d:quota-used-bytes>376461895</d:quota-used-bytes>
|
||||
// </d:prop>
|
||||
// <d:status>HTTP/1.1 200 OK</d:status>
|
||||
// </d:propstat>
|
||||
// </d:response>
|
||||
// </d:multistatus>
|
||||
type Quota struct {
|
||||
Available int64 `xml:"DAV: response>propstat>prop>quota-available-bytes"`
|
||||
Used int64 `xml:"DAV: response>propstat>prop>quota-used-bytes"`
|
||||
}
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
package odrvcookie
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// CookieRenew holds information for the renew
|
||||
type CookieRenew struct {
|
||||
srv *rest.Client
|
||||
timer *time.Ticker
|
||||
renewFn func()
|
||||
}
|
||||
|
||||
// NewRenew returns and starts a CookieRenew
|
||||
func NewRenew(interval time.Duration, renewFn func()) *CookieRenew {
|
||||
renew := CookieRenew{
|
||||
timer: time.NewTicker(interval),
|
||||
renewFn: renewFn,
|
||||
}
|
||||
go renew.Renew()
|
||||
return &renew
|
||||
}
|
||||
|
||||
// Renew calls the renewFn for every tick
|
||||
func (c *CookieRenew) Renew() {
|
||||
for {
|
||||
<-c.timer.C // wait for tick
|
||||
c.renewFn()
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user