mirror of
https://github.com/rclone/rclone.git
synced 2026-01-13 05:53:16 +00:00
Compare commits
348 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
67562081f7 | ||
|
|
41917eb1f2 | ||
|
|
c3e996f10f | ||
|
|
63f6827a0d | ||
|
|
96e2271cce | ||
|
|
ac3c83f966 | ||
|
|
b9c8e61d39 | ||
|
|
a6056408dd | ||
|
|
b9479cf7ab | ||
|
|
452a5badc1 | ||
|
|
d645bf0966 | ||
|
|
50addaa91e | ||
|
|
02a3bbaa3d | ||
|
|
a20d80565b | ||
|
|
56adb52a21 | ||
|
|
8c2fc6daf8 | ||
|
|
4bd9932703 | ||
|
|
2a1d4b7563 | ||
|
|
b394431f18 | ||
|
|
cc628717d8 | ||
|
|
f3e00133a0 | ||
|
|
606961f49d | ||
|
|
13591c7c00 | ||
|
|
28f4061892 | ||
|
|
018fe80bcb | ||
|
|
0a43ff9c13 | ||
|
|
9aae143833 | ||
|
|
c8e2531c8b | ||
|
|
9290004bb8 | ||
|
|
cbebefebc4 | ||
|
|
6f3897ce2c | ||
|
|
ea5878f590 | ||
|
|
46f8e50614 | ||
|
|
70dc97231e | ||
|
|
f6a053df6e | ||
|
|
af4ef8ad8d | ||
|
|
13797a1fb8 | ||
|
|
3ad8fb8634 | ||
|
|
ab43005422 | ||
|
|
b1f131964e | ||
|
|
1a87b69376 | ||
|
|
5a3b109e25 | ||
|
|
a67c7461ee | ||
|
|
e0aa4bb492 | ||
|
|
ab0947ee37 | ||
|
|
bd0227450e | ||
|
|
f438f1e9ef | ||
|
|
3f7b2c1ade | ||
|
|
6e35a3b3ce | ||
|
|
d3dd672640 | ||
|
|
2a46be8cf3 | ||
|
|
1b4370bde1 | ||
|
|
cc6a776034 | ||
|
|
2cfb3834f2 | ||
|
|
46135d830e | ||
|
|
318e42e35b | ||
|
|
c7f04e24d3 | ||
|
|
e4650eff58 | ||
|
|
869d91269d | ||
|
|
df1092ef33 | ||
|
|
4c5b2833b3 | ||
|
|
7fe653c350 | ||
|
|
661715733a | ||
|
|
f17cb1bf50 | ||
|
|
9ec06df79f | ||
|
|
67d0375b98 | ||
|
|
4882b8ba67 | ||
|
|
108760e17b | ||
|
|
f15e7e89d2 | ||
|
|
e2788aa729 | ||
|
|
772f99fd74 | ||
|
|
9bbcdeefd0 | ||
|
|
a21cc161de | ||
|
|
e818b7c206 | ||
|
|
5723d788a4 | ||
|
|
1d6698a754 | ||
|
|
1fce83b936 | ||
|
|
ccdd1ea6c4 | ||
|
|
348734584b | ||
|
|
c6a79ff72d | ||
|
|
b6f1391da3 | ||
|
|
ce94c0e729 | ||
|
|
58befe280c | ||
|
|
4c0f4ccb65 | ||
|
|
085677d511 | ||
|
|
0a922ad1dc | ||
|
|
83c3bb2f1a | ||
|
|
83087a45f0 | ||
|
|
cadf202107 | ||
|
|
36700d36a7 | ||
|
|
ad85f6e413 | ||
|
|
536526cc92 | ||
|
|
ac9c20b048 | ||
|
|
2db35f0ce7 | ||
|
|
dbfa7031d2 | ||
|
|
c2d0e86431 | ||
|
|
68ec6a9f5b | ||
|
|
753b0717be | ||
|
|
3bdad260b0 | ||
|
|
d205dc23e9 | ||
|
|
bdd26d71b2 | ||
|
|
8b2f6faf18 | ||
|
|
7c01bbddf8 | ||
|
|
1752ee3c8b | ||
|
|
5c2d8ffe33 | ||
|
|
7fecd5c8c6 | ||
|
|
19b7ff12ad | ||
|
|
b053234eb1 | ||
|
|
640d7bd365 | ||
|
|
8af68e779f | ||
|
|
3a1198cac5 | ||
|
|
022ab4516d | ||
|
|
17aac9b15f | ||
|
|
6c0c9abd57 | ||
|
|
70496c15e1 | ||
|
|
8b61e68bb7 | ||
|
|
bb75d80d33 | ||
|
|
157d7d45f5 | ||
|
|
b5cba73cc3 | ||
|
|
dd36264aad | ||
|
|
ddb47758f3 | ||
|
|
9539bbf78a | ||
|
|
0f8e7c3843 | ||
|
|
b835330714 | ||
|
|
310db14ed6 | ||
|
|
7f2e9d9a6b | ||
|
|
6cc9c09610 | ||
|
|
93c60c34e1 | ||
|
|
02c11dd4a7 | ||
|
|
40dc575aa4 | ||
|
|
f8101771c9 | ||
|
|
8f4d6973fb | ||
|
|
ced3a4bc19 | ||
|
|
cb22583212 | ||
|
|
414b35ea56 | ||
|
|
f469905d07 | ||
|
|
20f4b2c91d | ||
|
|
37543bd1d9 | ||
|
|
0dc0052e93 | ||
|
|
bd27473762 | ||
|
|
9dccf91da7 | ||
|
|
a1323eb204 | ||
|
|
e57c4406f3 | ||
|
|
fdd4b4ee22 | ||
|
|
8ef551bf9c | ||
|
|
2119fb4314 | ||
|
|
0166544319 | ||
|
|
874a64e5f6 | ||
|
|
e0c03a11ab | ||
|
|
3c7f80f58f | ||
|
|
229ea3f86c | ||
|
|
41eb386063 | ||
|
|
dfc7cd97a3 | ||
|
|
280ac26464 | ||
|
|
88cca8a6eb | ||
|
|
9c263e3e2b | ||
|
|
7d4e143dee | ||
|
|
3343c1afa4 | ||
|
|
b279df2e67 | ||
|
|
e6f340d245 | ||
|
|
bfc66cceaa | ||
|
|
1105b6bd94 | ||
|
|
694d390710 | ||
|
|
6b6b43402b | ||
|
|
6f46270735 | ||
|
|
ee5e34a19c | ||
|
|
70902b4051 | ||
|
|
f46304e8ae | ||
|
|
40252f0aa6 | ||
|
|
e7b9cc4705 | ||
|
|
867a26fe4f | ||
|
|
3890105cdc | ||
|
|
d2219a800a | ||
|
|
ccb59480bd | ||
|
|
b5c5209162 | ||
|
|
835b6761b7 | ||
|
|
f30c836696 | ||
|
|
090ce00afc | ||
|
|
377986d599 | ||
|
|
95e4d837ef | ||
|
|
e08e35984c | ||
|
|
a3b4c8a0f2 | ||
|
|
700e47d6e2 | ||
|
|
ea11f5ff3d | ||
|
|
758c7f2d84 | ||
|
|
ef06371c93 | ||
|
|
85a0f25b95 | ||
|
|
84b00b362f | ||
|
|
bfd7601cf9 | ||
|
|
4676a89963 | ||
|
|
8cd3c25b41 | ||
|
|
5f97603684 | ||
|
|
f1debd4701 | ||
|
|
1cd0d9a1f2 | ||
|
|
a6320bbad3 | ||
|
|
b1dd8e998b | ||
|
|
c2e8f06bfa | ||
|
|
08a8f7174a | ||
|
|
ce4c1d4f35 | ||
|
|
a0b9bd527e | ||
|
|
ce05ef7110 | ||
|
|
6a47d966a4 | ||
|
|
85d99de26b | ||
|
|
4a82251c62 | ||
|
|
e62c0a58a7 | ||
|
|
1f3e48f18f | ||
|
|
bbbe11790b | ||
|
|
13edf62824 | ||
|
|
558bc2e132 | ||
|
|
0f73129ab7 | ||
|
|
1373efaa39 | ||
|
|
5c37b777fc | ||
|
|
d4df3f2154 | ||
|
|
8ae424c5a3 | ||
|
|
cae19df058 | ||
|
|
8c211fc8df | ||
|
|
74a71f7824 | ||
|
|
12b51c5eb8 | ||
|
|
14069fd8e6 | ||
|
|
cd62f41606 | ||
|
|
109d4ee490 | ||
|
|
18ebec8276 | ||
|
|
c47b4f828f | ||
|
|
c3a0c0c451 | ||
|
|
6cb0de43ce | ||
|
|
83f0d3e03d | ||
|
|
eda4130703 | ||
|
|
ccba859812 | ||
|
|
de3cf5e8d7 | ||
|
|
ce305321b6 | ||
|
|
e6117e978e | ||
|
|
4b40898743 | ||
|
|
ae3a0ec27e | ||
|
|
d9458fb4ee | ||
|
|
27f67edb1a | ||
|
|
3ffea738e6 | ||
|
|
a63dd6020c | ||
|
|
d0678bc3e5 | ||
|
|
ce04a073ef | ||
|
|
c337a367f3 | ||
|
|
7ae40cb352 | ||
|
|
e8daab7971 | ||
|
|
78c3a5ccfa | ||
|
|
2142c75846 | ||
|
|
c724d8f614 | ||
|
|
af5f4ee724 | ||
|
|
01aa4394a6 | ||
|
|
2646519712 | ||
|
|
5b2efd563a | ||
|
|
e7b7432079 | ||
|
|
ea2ef4443b | ||
|
|
25f22ec561 | ||
|
|
5189231a34 | ||
|
|
bcbd30bb8a | ||
|
|
c245183101 | ||
|
|
4ce2a84df0 | ||
|
|
3c31d711b3 | ||
|
|
3f5d8390ba | ||
|
|
78edafcaac | ||
|
|
1ce3673006 | ||
|
|
3423de65fa | ||
|
|
0c81439bc3 | ||
|
|
77fb8ac240 | ||
|
|
979dfb8cc6 | ||
|
|
fe0289f2f5 | ||
|
|
6a64567dd7 | ||
|
|
8de8cd62ca | ||
|
|
cba27d2920 | ||
|
|
9ade179407 | ||
|
|
82b85431bd | ||
|
|
98778b1870 | ||
|
|
dfd46c23f9 | ||
|
|
3ac4407b88 | ||
|
|
8ea0d5212f | ||
|
|
acd350d833 | ||
|
|
2f4b9f619d | ||
|
|
70efd0274c | ||
|
|
33b3eea6ec | ||
|
|
113624691a | ||
|
|
afaec1a2e9 | ||
|
|
ddf39f2d57 | ||
|
|
2df5d95d70 | ||
|
|
64a808ac76 | ||
|
|
05dc7183cb | ||
|
|
e69e181090 | ||
|
|
a1269fa669 | ||
|
|
8369b5209f | ||
|
|
2aa3c0a2af | ||
|
|
ac65d8369e | ||
|
|
7a24532224 | ||
|
|
8057d668bb | ||
|
|
36f1bc4a8a | ||
|
|
beb8098b0a | ||
|
|
6e64a71382 | ||
|
|
3cbd57d9ad | ||
|
|
4f50b26af0 | ||
|
|
cb651b5866 | ||
|
|
3c1069c815 | ||
|
|
7f0020a407 | ||
|
|
c270c1c80c | ||
|
|
29ecc2d8bb | ||
|
|
13da1b8d28 | ||
|
|
0b338eaa28 | ||
|
|
46696865fd | ||
|
|
fcea3777c0 | ||
|
|
5023050d95 | ||
|
|
bed01a303f | ||
|
|
2c2cb84ca7 | ||
|
|
e9dda25c60 | ||
|
|
80ffbade22 | ||
|
|
7beb50caa7 | ||
|
|
e8ba43c479 | ||
|
|
dcd6bedc27 | ||
|
|
5bb76cc35c | ||
|
|
3e68d485f2 | ||
|
|
1945f09d06 | ||
|
|
2c66bdd6bb | ||
|
|
a4f3548bbf | ||
|
|
4276abc58b | ||
|
|
a795d93bc3 | ||
|
|
5df04cb763 | ||
|
|
ef54167a4a | ||
|
|
d42cb11b84 | ||
|
|
b257de4aba | ||
|
|
365b4babae | ||
|
|
6d48dffa2f | ||
|
|
8f2999b6af | ||
|
|
be6115fbfa | ||
|
|
2fcb8f5db7 | ||
|
|
0ab3f020ab | ||
|
|
64c23c2f5b | ||
|
|
ff16e0f6df | ||
|
|
1a82ba196b | ||
|
|
ed72c678f8 | ||
|
|
4ed8836a71 | ||
|
|
5529978fa7 | ||
|
|
66d84c9914 | ||
|
|
b85ddc4e4f | ||
|
|
e4a9e27a55 | ||
|
|
22645eea2e | ||
|
|
345c98ed62 | ||
|
|
b872ff0237 | ||
|
|
1b95718460 | ||
|
|
6a3580c556 | ||
|
|
16c9fba5de | ||
|
|
4e952af614 | ||
|
|
6344c3051c | ||
|
|
ab9f521cbd |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -4,7 +4,3 @@ rclone
|
||||
rclonetest/rclonetest
|
||||
build
|
||||
docs/public
|
||||
MANUAL.md
|
||||
MANUAL.html
|
||||
MANUAL.txt
|
||||
rclone.1
|
||||
|
||||
19
.travis.yml
19
.travis.yml
@@ -1,12 +1,19 @@
|
||||
language: go
|
||||
sudo: false
|
||||
osx_image: xcode7.3
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
go:
|
||||
- 1.3.3
|
||||
- 1.4.2
|
||||
- 1.5
|
||||
- 1.5.4
|
||||
- 1.6.2
|
||||
- tip
|
||||
|
||||
install:
|
||||
- make build_dep
|
||||
|
||||
script:
|
||||
- go get ./...
|
||||
- go test -v ./...
|
||||
- go test -cpu=2 -race -v ./...
|
||||
- make check
|
||||
- make quicktest
|
||||
|
||||
161
CONTRIBUTING.md
Normal file
161
CONTRIBUTING.md
Normal file
@@ -0,0 +1,161 @@
|
||||
# Contributing to rclone #
|
||||
|
||||
This is a short guide on how to contribute things to rclone.
|
||||
|
||||
## Reporting a bug ##
|
||||
|
||||
Bug reports are welcome. Please when submitting add:
|
||||
|
||||
* Rclone version (eg output from `rclone -V`)
|
||||
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
* The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
* A log of the command with the `-v` flag (eg output from `rclone -v copy /tmp remote:tmp`)
|
||||
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||
|
||||
## Submitting a pull request ##
|
||||
|
||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||
like to implement then please submit a pull request via Github.
|
||||
|
||||
If it is a big feature then make an issue first so it can be discussed.
|
||||
|
||||
You'll need a Go environment set up with GOPATH set. See [the Go
|
||||
getting started docs](https://golang.org/doc/install) for more info.
|
||||
|
||||
First in your web browser press the fork button on [rclone's Github
|
||||
page](https://github.com/ncw/rclone).
|
||||
|
||||
Now in your terminal
|
||||
|
||||
go get github.com/ncw/rclone
|
||||
cd $GOPATH/src/github.com/ncw/rclone
|
||||
git remote rename origin upstream
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
|
||||
Make a branch to add your new feature
|
||||
|
||||
git checkout -b my-new-feature
|
||||
|
||||
And get hacking.
|
||||
|
||||
When ready - run the unit tests for the code you changed
|
||||
|
||||
go test -v
|
||||
|
||||
Note that you make need to make a test remote, eg `TestSwift` for some
|
||||
of the unit tests.
|
||||
|
||||
Note the top level Makefile targets
|
||||
|
||||
* make check
|
||||
* make test
|
||||
|
||||
Both of these will be run by Travis when you make a pull request but
|
||||
you can do this yourself locally too.
|
||||
|
||||
Make sure you
|
||||
|
||||
* Add documentation for a new feature
|
||||
* Add unit tests for a new feature
|
||||
* squash commits down to one per feature
|
||||
* rebase to master `git rebase master`
|
||||
|
||||
When you are done with that
|
||||
|
||||
git push origin my-new-feature
|
||||
|
||||
Go to the Github website and click [Create pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
You patch will get reviewed and you might get asked to fix some stuff.
|
||||
|
||||
If so, then make the changes in the same branch, squash the commits,
|
||||
rebase it to master then push it to Github with `--force`.
|
||||
|
||||
## Testing ##
|
||||
|
||||
rclone's tests are run from the go testing framework, so at the top
|
||||
level you can run this to run all the tests.
|
||||
|
||||
go test -v ./...
|
||||
|
||||
rclone contains a mixture of unit tests and integration tests.
|
||||
Because it is difficult (and in some respects pointless) to test cloud
|
||||
storage systems by mocking all their interfaces, rclone unit tests can
|
||||
run against any of the backends. This is done by making specially
|
||||
named remotes in the default config file.
|
||||
|
||||
If you wanted to test changes in the `drive` backend, then you would
|
||||
need to make a remote called `TestDrive`.
|
||||
|
||||
You can then run the unit tests in the drive directory. These tests
|
||||
are skipped if `TestDrive:` isn't defined.
|
||||
|
||||
cd drive
|
||||
go test -v
|
||||
|
||||
You can then run the integration tests which tests all of rclone's
|
||||
operations. Normally these get run against the local filing system,
|
||||
but they can be run against any of the remotes.
|
||||
|
||||
cd ../fs
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -subdir
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then run in that directory
|
||||
|
||||
go run test_all.go
|
||||
|
||||
## Making a release ##
|
||||
|
||||
There are separate instructions for making a release in the RELEASE.md
|
||||
file - doing the first few steps is useful before making a
|
||||
contribution.
|
||||
|
||||
* go get -u -f -v ./...
|
||||
* make check
|
||||
* make test
|
||||
* make tag
|
||||
|
||||
## Writing a new backend ##
|
||||
|
||||
Choose a name. The docs here will use `remote` as an example.
|
||||
|
||||
Note that in rclone terminology a file system backend is called a
|
||||
remote or an fs.
|
||||
|
||||
Research
|
||||
|
||||
* Look at the interfaces defined in `fs/fs.go`
|
||||
* Study one or more of the existing remotes
|
||||
|
||||
Getting going
|
||||
|
||||
* Create `remote/remote.go` (copy this from a similar fs)
|
||||
* Add your fs to the imports in `fs/all/all.go`
|
||||
|
||||
Unit tests
|
||||
|
||||
* Create a config entry called `TestRemote` for the unit tests to use
|
||||
* Add your fs to the end of `fstest/fstests/gen_tests.go`
|
||||
* generate `remote/remote_test.go` unit tests `cd fstest/fstests; go generate`
|
||||
* Make sure all tests pass with `go test -v`
|
||||
|
||||
Integration tests
|
||||
|
||||
* Add your fs to `fs/test_all.go`
|
||||
* Make sure integration tests pass with
|
||||
* `cd fs`
|
||||
* `go test -v -remote TestRemote:` and
|
||||
* `go test -v -remote TestRemote: -subdir`
|
||||
|
||||
Add your fs to the docs
|
||||
|
||||
* `README.md` - main Github page
|
||||
* `docs/content/remote.md` - main docs page
|
||||
* `docs/content/overview.md` - overview docs
|
||||
* `docs/content/docs.md` - list of remotes in config section
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `make_manual.py` - add the page to the `docs` constant
|
||||
13
ISSUE_TEMPLATE.md
Normal file
13
ISSUE_TEMPLATE.md
Normal file
@@ -0,0 +1,13 @@
|
||||
When filing an issue, please include the following information if
|
||||
possible as well as a description of the problem.
|
||||
|
||||
> What is your rclone version (eg output from `rclone -V`)
|
||||
|
||||
> Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
|
||||
> Which cloud storage system are you using? (eg Google Drive)
|
||||
|
||||
> The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
|
||||
> A log from the command with the `-v` flag (eg output from `rclone -v copy /tmp remote:tmp`)
|
||||
|
||||
2777
MANUAL.html
Normal file
2777
MANUAL.html
Normal file
File diff suppressed because it is too large
Load Diff
3957
MANUAL.txt
Normal file
3957
MANUAL.txt
Normal file
File diff suppressed because it is too large
Load Diff
41
Makefile
41
Makefile
@@ -1,3 +1,4 @@
|
||||
SHELL = /bin/bash
|
||||
TAG := $(shell git describe --tags)
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
@@ -6,14 +7,33 @@ rclone:
|
||||
@go version
|
||||
go install -v ./...
|
||||
|
||||
# Full suite of integration tests
|
||||
test: rclone
|
||||
go test ./...
|
||||
cd fs && ./test_all.sh
|
||||
cd fs && go run test_all.go
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
go test ./...
|
||||
go test -cpu=2 -race ./...
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
go vet ./...
|
||||
errcheck ./...
|
||||
golint ./...
|
||||
goimports -d . | grep . ; test $$? -eq 1
|
||||
golint ./... | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
go get -t ./...
|
||||
go get -u github.com/kisielk/errcheck
|
||||
go get -u golang.org/x/tools/cmd/goimports
|
||||
go get -u github.com/golang/lint/golint
|
||||
|
||||
# Update dependencies
|
||||
update:
|
||||
go get -t -u -f -v ./...
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt
|
||||
|
||||
@@ -37,7 +57,7 @@ clean:
|
||||
go clean ./...
|
||||
find . -name \*~ | xargs -r rm -f
|
||||
rm -rf build docs/public
|
||||
rm -f rclone rclonetest/rclonetest rclone.1 MANUAL.md MANUAL.html MANUAL.txt
|
||||
rm -f rclone rclonetest/rclonetest
|
||||
|
||||
website:
|
||||
cd docs && hugo
|
||||
@@ -48,16 +68,25 @@ upload_website: website
|
||||
upload:
|
||||
rclone -v copy build/ memstore:downloads-rclone-org
|
||||
|
||||
upload_github:
|
||||
./upload-github $(TAG)
|
||||
|
||||
cross: doc
|
||||
./cross-compile $(TAG)
|
||||
|
||||
serve:
|
||||
beta:
|
||||
./cross-compile $(TAG)β
|
||||
rm build/*-current-*
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)β
|
||||
@echo Beta release ready at http://pub.rclone.org/$(TAG)%CE%B2/
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server -v -w
|
||||
|
||||
tag:
|
||||
tag: doc
|
||||
@echo "Old tag is $(LAST_TAG)"
|
||||
@echo "New tag is $(NEW_TAG)"
|
||||
echo -e "package fs\n\n// Version of rclone\nconst Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
perl -lpe 's/VERSION/${NEW_TAG}/g; s/DATE/'`date -I`'/g;' docs/content/downloads.md.in > docs/content/downloads.md
|
||||
git tag $(NEW_TAG)
|
||||
@echo "Add this to changelog in docs/content/changelog.md"
|
||||
|
||||
18
README.md
18
README.md
@@ -2,11 +2,13 @@
|
||||
|
||||
[Website](http://rclone.org) |
|
||||
[Documentation](http://rclone.org/docs/) |
|
||||
[Contributing](CONTRIBUTING.md) |
|
||||
[Changelog](http://rclone.org/changelog/) |
|
||||
[Installation](http://rclone.org/install/) |
|
||||
[G+](https://google.com/+RcloneOrg)
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone) [](https://godoc.org/github.com/ncw/rclone)
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone) [](https://ci.appveyor.com/project/ncw/rclone) [](https://godoc.org/github.com/ncw/rclone)
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
|
||||
@@ -15,18 +17,22 @@ Rclone is a command line program to sync files and directories to and from
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore
|
||||
* Dropbox
|
||||
* Google Cloud Storage
|
||||
* Amazon Cloud Drive
|
||||
* Amazon Drive
|
||||
* Microsoft One Drive
|
||||
* Hubic
|
||||
* Backblaze B2
|
||||
* Yandex Disk
|
||||
* The local filesystem
|
||||
|
||||
Features
|
||||
|
||||
* MD5SUMs checked at all times for file integrity
|
||||
* MD5/SHA1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* Copy mode to just copy new/changed files
|
||||
* Sync mode to make a directory identical
|
||||
* Check mode to check all MD5SUMs
|
||||
* Can sync to and from network, eg two different Drive accounts
|
||||
* Sync (one way) mode to make a directory identical
|
||||
* Check mode to check for file hash equality
|
||||
* Can sync to and from network, eg two different cloud accounts
|
||||
|
||||
See the home page for installation, usage, documentation, changelog
|
||||
and configuration walkthroughs.
|
||||
|
||||
@@ -8,11 +8,12 @@ Required software for making a release
|
||||
* golint - go get github.com/golang/lint
|
||||
|
||||
Making a release
|
||||
* go get -u -f -v ./...
|
||||
* make update
|
||||
* make check
|
||||
* make test
|
||||
* make tag
|
||||
* edit docs/content/changelog.md
|
||||
* make doc
|
||||
* git commit -a -v
|
||||
* make retag
|
||||
* # Set the GOPATH for a gox enabled compiler - . ~/bin/go-cross - not required for go >= 1.5
|
||||
@@ -20,3 +21,4 @@ Making a release
|
||||
* make upload
|
||||
* make upload_website
|
||||
* git push --tags origin master
|
||||
* make upload_github
|
||||
|
||||
@@ -16,12 +16,9 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/go-acd"
|
||||
@@ -29,22 +26,27 @@ import (
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/ncw/rclone/pacer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "amzn1.application-oa2-client.6bf18d2d1f5b485c94c8988bb03ad0e7"
|
||||
rcloneClientSecret = "k8/NyszKm5vEkZXAwsbGkd6C3NrbjIqMg4qEhIeF14Szub2wur+/teS3ubXgsLe9//+tr/qoqK+lq6mg8vWkoA=="
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
assetKind = "ASSET"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
rcloneClientID = "amzn1.application-oa2-client.6bf18d2d1f5b485c94c8988bb03ad0e7"
|
||||
rcloneEncryptedClientSecret = "k8/NyszKm5vEkZXAwsbGkd6C3NrbjIqMg4qEhIeF14Szub2wur+/teS3ubXgsLe9//+tr/qoqK+lq6mg8vWkoA=="
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
assetKind = "ASSET"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
warnFileSize = 50 << 30 // Display warning for files larger than this size
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
|
||||
// Description of how to auth for this app
|
||||
acdConfig = &oauth2.Config{
|
||||
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
|
||||
@@ -53,46 +55,50 @@ var (
|
||||
TokenURL: "https://api.amazon.com/auth/o2/token",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: fs.Reveal(rcloneClientSecret),
|
||||
ClientSecret: fs.Reveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.Info{
|
||||
Name: "amazon cloud drive",
|
||||
NewFs: NewFs,
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "amazon cloud drive",
|
||||
Description: "Amazon Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config(name, acdConfig)
|
||||
err := oauthutil.Config("amazon cloud drive", name, acdConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: oauthutil.ConfigClientID,
|
||||
Name: fs.ConfigClientID,
|
||||
Help: "Amazon Application Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: oauthutil.ConfigClientSecret,
|
||||
Name: fs.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
pflag.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
|
||||
}
|
||||
|
||||
// FsAcd represents a remote acd server
|
||||
type FsAcd struct {
|
||||
name string // name of this remote
|
||||
c *acd.Client // the connection to the acd server
|
||||
root string // the path we are working on
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
// Fs represents a remote acd server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
c *acd.Client // the connection to the acd server
|
||||
noAuthClient *http.Client // unauthenticated http client
|
||||
root string // the path we are working on
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
ts *oauthutil.TokenSource // token source for oauth
|
||||
}
|
||||
|
||||
// FsObjectAcd describes a acd object
|
||||
// Object describes a acd object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type FsObjectAcd struct {
|
||||
acd *FsAcd // what this object is part of
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
info *acd.Node // Info from the acd object if known
|
||||
}
|
||||
@@ -100,18 +106,18 @@ type FsObjectAcd struct {
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *FsAcd) Name() string {
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *FsAcd) Root() string {
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this FsAcd to a string
|
||||
func (f *FsAcd) String() string {
|
||||
return fmt.Sprintf("Amazon cloud drive root '%s'", f.root)
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("amazon drive root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Pattern to match a acd path
|
||||
@@ -125,74 +131,72 @@ func parsePath(path string) (root string) {
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
400, // Bad request (seen in "Next token is expired")
|
||||
401, // Unauthorized (seen in "Token has expired")
|
||||
408, // Request Timeout
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
409, // Conflict - happens in the unit tests a lot
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Time-out
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// See if HTTP error code is to be retried
|
||||
if err != nil && resp != nil {
|
||||
for _, e := range retryErrorCodes {
|
||||
if resp.StatusCode == e {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allow retry if request times out. Adapted from
|
||||
// http://stackoverflow.com/questions/23494950/specifically-check-for-timeout-error
|
||||
switch err := err.(type) {
|
||||
case *url.Error:
|
||||
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
|
||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
if resp != nil {
|
||||
if resp.StatusCode == 401 {
|
||||
f.ts.Invalidate()
|
||||
fs.Log(f, "401 error received - invalidating token")
|
||||
return true, err
|
||||
}
|
||||
case net.Error:
|
||||
if err.Timeout() {
|
||||
// Work around receiving this error sporadically on authentication
|
||||
//
|
||||
// HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
|
||||
if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") {
|
||||
fs.Log(f, "403 \"Authorization header requires...\" error received - retry")
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// NewFs constructs an FsAcd from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, err := oauthutil.NewClient(name, acdConfig)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, acdConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure amazon cloud drive: %v", err)
|
||||
log.Fatalf("Failed to configure Amazon Drive: %v", err)
|
||||
}
|
||||
|
||||
c := acd.NewClient(oAuthClient)
|
||||
c.UserAgent = fs.UserAgent
|
||||
f := &FsAcd{
|
||||
name: name,
|
||||
root: root,
|
||||
c: c,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
c: c,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||
noAuthClient: fs.Config.Client(),
|
||||
ts: ts,
|
||||
}
|
||||
|
||||
// Update endpoints
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, resp, err = f.c.Account.GetEndpoints()
|
||||
return shouldRetry(resp, err)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get endpoints: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to get endpoints")
|
||||
}
|
||||
|
||||
// Get rootID
|
||||
var rootInfo *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rootInfo, resp, err = f.c.Nodes.GetRoot()
|
||||
return shouldRetry(resp, err)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil || rootInfo.Id == nil {
|
||||
return nil, fmt.Errorf("Failed to get root: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to get root")
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, *rootInfo.Id, f)
|
||||
@@ -211,23 +215,26 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
obj := newF.newFsObjectWithInfo(remote, nil)
|
||||
if obj == nil {
|
||||
// File doesn't exist so return old f
|
||||
return f, nil
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// return a Fs Limited to this object
|
||||
return fs.NewLimited(&newF, obj), nil
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
// Return an Object from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsAcd) newFsObjectWithInfo(remote string, info *acd.Node) fs.Object {
|
||||
o := &FsObjectAcd{
|
||||
acd: f,
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
@@ -236,29 +243,27 @@ func (f *FsAcd) newFsObjectWithInfo(remote string, info *acd.Node) fs.Object {
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
// logged already FsDebug("Failed to read info: %s", err)
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return o
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewFsObject returns an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsAcd) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *FsAcd) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
//fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||
var resp *http.Response
|
||||
var subFolder *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
subFolder, resp, err = folder.GetFolder(leaf)
|
||||
return shouldRetry(resp, err)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if err == acd.ErrorNodeNotFound {
|
||||
@@ -269,7 +274,7 @@ func (f *FsAcd) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err
|
||||
return "", false, err
|
||||
}
|
||||
if subFolder.Status != nil && *subFolder.Status != statusAvailable {
|
||||
fs.Debug(f, "Ignoring folder %q in state %q", *subFolder.Status)
|
||||
fs.Debug(f, "Ignoring folder %q in state %q", leaf, *subFolder.Status)
|
||||
time.Sleep(1 * time.Second) // FIXME wait for problem to go away!
|
||||
return "", false, nil
|
||||
}
|
||||
@@ -278,14 +283,14 @@ func (f *FsAcd) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *FsAcd) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
|
||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||
var resp *http.Response
|
||||
var info *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.CreateFolder(leaf)
|
||||
return shouldRetry(resp, err)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -306,7 +311,7 @@ type listAllFn func(*acd.Node) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *FsAcd) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
query := "parents:" + dirID
|
||||
if directoriesOnly {
|
||||
query += " AND kind:" + folderKind
|
||||
@@ -321,18 +326,16 @@ func (f *FsAcd) listAll(dirID string, title string, directoriesOnly bool, filesO
|
||||
Filters: query,
|
||||
}
|
||||
var nodes []*acd.Node
|
||||
var out []*acd.Node
|
||||
//var resp *http.Response
|
||||
OUTER:
|
||||
for {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
nodes, resp, err = f.c.Nodes.GetNodes(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't list files: %v", err)
|
||||
break
|
||||
return false, err
|
||||
}
|
||||
if nodes == nil {
|
||||
break
|
||||
@@ -343,109 +346,74 @@ OUTER:
|
||||
if *node.Status != statusAvailable {
|
||||
continue
|
||||
}
|
||||
if fn(node) {
|
||||
found = true
|
||||
break OUTER
|
||||
}
|
||||
// Store the nodes up in case we have to retry the listing
|
||||
out = append(out, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Send the nodes now
|
||||
for _, node := range out {
|
||||
if fn(node) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Path should be directory path either "" or "path/"
|
||||
//
|
||||
// List the directory using a recursive list from the root
|
||||
//
|
||||
// This fetches the minimum amount of stuff but does more API calls
|
||||
// which makes it slow
|
||||
func (f *FsAcd) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
|
||||
var subError error
|
||||
// Make the API request
|
||||
var wg sync.WaitGroup
|
||||
_, err := f.listAll(dirID, "", false, false, func(node *acd.Node) bool {
|
||||
// Recurse on directories
|
||||
switch *node.Kind {
|
||||
case folderKind:
|
||||
wg.Add(1)
|
||||
folder := path + *node.Name + "/"
|
||||
fs.Debug(f, "Reading %s", folder)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := f.listDirRecursive(*node.Id, folder, out)
|
||||
// ListDir reads the directory specified by the job into out, returning any more jobs
|
||||
func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) {
|
||||
fs.Debug(f, "Reading %q", job.Path)
|
||||
maxTries := fs.Config.LowLevelRetries
|
||||
for tries := 1; tries <= maxTries; tries++ {
|
||||
_, err = f.listAll(job.DirID, "", false, false, func(node *acd.Node) bool {
|
||||
remote := job.Path + *node.Name
|
||||
switch *node.Kind {
|
||||
case folderKind:
|
||||
if out.IncludeDirectory(remote) {
|
||||
dir := &fs.Dir{
|
||||
Name: remote,
|
||||
Bytes: -1,
|
||||
Count: -1,
|
||||
}
|
||||
dir.When, _ = time.Parse(timeFormat, *node.ModifiedDate) // FIXME
|
||||
if out.AddDir(dir) {
|
||||
return true
|
||||
}
|
||||
if job.Depth > 0 {
|
||||
jobs = append(jobs, dircache.ListDirJob{DirID: *node.Id, Path: remote + "/", Depth: job.Depth - 1})
|
||||
}
|
||||
}
|
||||
case fileKind:
|
||||
o, err := f.newObjectWithInfo(remote, node)
|
||||
if err != nil {
|
||||
subError = err
|
||||
fs.ErrorLog(f, "Error reading %s:%s", folder, err)
|
||||
out.SetError(err)
|
||||
return true
|
||||
}
|
||||
}()
|
||||
if out.Add(o) {
|
||||
return true
|
||||
}
|
||||
default:
|
||||
// ignore ASSET etc
|
||||
}
|
||||
return false
|
||||
case fileKind:
|
||||
if fs := f.newFsObjectWithInfo(path+*node.Name, node); fs != nil {
|
||||
out <- fs
|
||||
}
|
||||
default:
|
||||
// ignore ASSET etc
|
||||
})
|
||||
if fs.IsRetryError(err) {
|
||||
fs.Debug(f, "Directory listing error for %q: %v - low level retry %d/%d", job.Path, err, tries, maxTries)
|
||||
continue
|
||||
}
|
||||
return false
|
||||
})
|
||||
wg.Wait()
|
||||
fs.Debug(f, "Finished reading %s", path)
|
||||
if err != nil {
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
break
|
||||
}
|
||||
if subError != nil {
|
||||
return subError
|
||||
}
|
||||
return nil
|
||||
fs.Debug(f, "Finished reading %q", job.Path)
|
||||
return jobs, err
|
||||
}
|
||||
|
||||
// List walks the path returning a channel of FsObjects
|
||||
func (f *FsAcd) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
err := f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
err = f.listDirRecursive(f.dirCache.RootID(), "", out)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "List failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// ListDir lists the directories
|
||||
func (f *FsAcd) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
err := f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
_, err := f.listAll(f.dirCache.RootID(), "", true, false, func(item *acd.Node) bool {
|
||||
dir := &fs.Dir{
|
||||
Name: *item.Name,
|
||||
Bytes: -1,
|
||||
Count: -1,
|
||||
}
|
||||
dir.When, _ = time.Parse(timeFormat, *item.ModifiedDate)
|
||||
out <- dir
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "ListDir failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
// List walks the path returning iles and directories into out
|
||||
func (f *Fs) List(out fs.ListOpts, dir string) {
|
||||
f.dirCache.List(f, out, dir)
|
||||
}
|
||||
|
||||
// Put the object into the container
|
||||
@@ -453,26 +421,42 @@ func (f *FsAcd) ListDir() fs.DirChan {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *FsAcd) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction
|
||||
o := &FsObjectAcd{
|
||||
acd: f,
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
size := src.Size()
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
// Check if object already exists
|
||||
err := o.readMetaData()
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(in, src)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
// If not create it
|
||||
leaf, directoryID, err := f.dirCache.FindPath(remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
folder := acd.FolderFromId(directoryID, o.acd.c.Nodes)
|
||||
if size > warnFileSize {
|
||||
fs.Debug(f, "Warning: file %q may fail because it is too big. Use --max-size=%dGB to skip large files.", remote, warnFileSize>>30)
|
||||
}
|
||||
folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
|
||||
var info *acd.File
|
||||
var resp *http.Response
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
if size != 0 {
|
||||
if src.Size() != 0 {
|
||||
info, resp, err = folder.Put(in, leaf)
|
||||
} else {
|
||||
info, resp, err = folder.PutSized(in, size, leaf)
|
||||
}
|
||||
return shouldRetry(resp, err)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -482,15 +466,15 @@ func (f *FsAcd) Put(in io.Reader, remote string, modTime time.Time, size int64)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *FsAcd) Mkdir() error {
|
||||
func (f *Fs) Mkdir() error {
|
||||
return f.dirCache.FindRoot(true)
|
||||
}
|
||||
|
||||
// purgeCheck remotes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *FsAcd) purgeCheck(check bool) error {
|
||||
func (f *Fs) purgeCheck(check bool) error {
|
||||
if f.root == "" {
|
||||
return fmt.Errorf("Can't purge root directory")
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(false)
|
||||
@@ -502,7 +486,7 @@ func (f *FsAcd) purgeCheck(check bool) error {
|
||||
if check {
|
||||
// check directory is empty
|
||||
empty := true
|
||||
_, err := f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
|
||||
_, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
|
||||
switch *node.Kind {
|
||||
case folderKind:
|
||||
empty = false
|
||||
@@ -519,7 +503,7 @@ func (f *FsAcd) purgeCheck(check bool) error {
|
||||
return err
|
||||
}
|
||||
if !empty {
|
||||
return fmt.Errorf("Directory not empty")
|
||||
return errors.New("directory not empty")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -527,7 +511,7 @@ func (f *FsAcd) purgeCheck(check bool) error {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = node.Trash()
|
||||
return shouldRetry(resp, err)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -543,15 +527,20 @@ func (f *FsAcd) purgeCheck(check bool) error {
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *FsAcd) Rmdir() error {
|
||||
func (f *Fs) Rmdir() error {
|
||||
return f.purgeCheck(true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *FsAcd) Precision() time.Duration {
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() fs.HashSet {
|
||||
return fs.HashSet(fs.HashMD5)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
@@ -561,18 +550,18 @@ func (f *FsAcd) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
//func (f *FsAcd) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// srcObj, ok := src.(*FsObjectAcd)
|
||||
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// srcObj, ok := src.(*Object)
|
||||
// if !ok {
|
||||
// fs.Debug(src, "Can't copy - not same remote type")
|
||||
// return nil, fs.ErrorCantCopy
|
||||
// }
|
||||
// srcFs := srcObj.acd
|
||||
// srcFs := srcObj.fs
|
||||
// _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// return f.NewFsObject(remote), nil
|
||||
// return f.NewObject(remote), nil
|
||||
//}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
@@ -580,19 +569,19 @@ func (f *FsAcd) Precision() time.Duration {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *FsAcd) Purge() error {
|
||||
func (f *Fs) Purge() error {
|
||||
return f.purgeCheck(false)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *FsObjectAcd) Fs() fs.Fs {
|
||||
return o.acd
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *FsObjectAcd) String() string {
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
@@ -600,12 +589,15 @@ func (o *FsObjectAcd) String() string {
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *FsObjectAcd) Remote() string {
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *FsObjectAcd) Md5sum() (string, error) {
|
||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t fs.HashType) (string, error) {
|
||||
if t != fs.HashMD5 {
|
||||
return "", fs.ErrHashUnsupported
|
||||
}
|
||||
if o.info.ContentProperties.Md5 != nil {
|
||||
return *o.info.ContentProperties.Md5, nil
|
||||
}
|
||||
@@ -613,30 +605,37 @@ func (o *FsObjectAcd) Md5sum() (string, error) {
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *FsObjectAcd) Size() int64 {
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(*o.info.ContentProperties.Size)
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *FsObjectAcd) readMetaData() (err error) {
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.info != nil {
|
||||
return nil
|
||||
}
|
||||
leaf, directoryID, err := o.acd.dirCache.FindPath(o.remote, false)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindPath(o.remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
folder := acd.FolderFromId(directoryID, o.acd.c.Nodes)
|
||||
folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
|
||||
var resp *http.Response
|
||||
var info *acd.File
|
||||
err = o.acd.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.GetFile(leaf)
|
||||
return shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read info: %s", err)
|
||||
if err == acd.ErrorNodeNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
o.info = info.Node
|
||||
@@ -648,38 +647,46 @@ func (o *FsObjectAcd) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *FsObjectAcd) ModTime() time.Time {
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
fs.Log(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
modTime, err := time.Parse(timeFormat, *o.info.ModifiedDate)
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read mtime from object: %s", err)
|
||||
fs.Log(o, "Failed to read mtime from object: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *FsObjectAcd) SetModTime(modTime time.Time) {
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
// FIXME not implemented
|
||||
return
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *FsObjectAcd) Storable() bool {
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectAcd) Open() (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
bigObject := o.Size() >= int64(tempLinkThreshold)
|
||||
if bigObject {
|
||||
fs.Debug(o, "Dowloading large object via tempLink")
|
||||
}
|
||||
file := acd.File{Node: o.info}
|
||||
var resp *http.Response
|
||||
err = o.acd.pacer.Call(func() (bool, error) {
|
||||
in, resp, err = file.Open()
|
||||
return shouldRetry(resp, err)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
if !bigObject {
|
||||
in, resp, err = file.Open()
|
||||
} else {
|
||||
in, resp, err = file.OpenTempURL(o.fs.noAuthClient)
|
||||
}
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
return in, err
|
||||
}
|
||||
@@ -687,18 +694,19 @@ func (o *FsObjectAcd) Open() (in io.ReadCloser, err error) {
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectAcd) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
|
||||
size := src.Size()
|
||||
file := acd.File{Node: o.info}
|
||||
var info *acd.File
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = o.acd.pacer.CallNoRetry(func() (bool, error) {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
if size != 0 {
|
||||
info, resp, err = file.OverwriteSized(in, size)
|
||||
} else {
|
||||
info, resp, err = file.Overwrite(in)
|
||||
}
|
||||
return shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -708,22 +716,22 @@ func (o *FsObjectAcd) Update(in io.Reader, modTime time.Time, size int64) error
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectAcd) Remove() error {
|
||||
func (o *Object) Remove() error {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = o.acd.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.info.Trash()
|
||||
return shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*FsAcd)(nil)
|
||||
_ fs.Purger = (*FsAcd)(nil)
|
||||
// _ fs.Copier = (*FsAcd)(nil)
|
||||
// _ fs.Mover = (*FsAcd)(nil)
|
||||
// _ fs.DirMover = (*FsAcd)(nil)
|
||||
_ fs.Object = (*FsObjectAcd)(nil)
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
// _ fs.Copier = (*Fs)(nil)
|
||||
// _ fs.Mover = (*Fs)(nil)
|
||||
// _ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -13,44 +13,46 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.FsObjectAcd)(nil))
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
|
||||
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
20
appveyor.yml
Normal file
20
appveyor.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\ncw\rclone
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go get -t -d ./...
|
||||
|
||||
build_script:
|
||||
- go vet ./...
|
||||
- go test -cpu=2 ./...
|
||||
- go test -cpu=2 -short -race ./...
|
||||
299
b2/api/types.go
Normal file
299
b2/api/types.go
Normal file
@@ -0,0 +1,299 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Error describes a B2 error response
|
||||
type Error struct {
|
||||
Status int `json:"status"` // The numeric HTTP status code. Always matches the status in the HTTP response.
|
||||
Code string `json:"code"` // A single-identifier code that identifies the error.
|
||||
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
||||
}
|
||||
|
||||
// Fatal statisfies the Fatal interface
|
||||
//
|
||||
// It indicates which errors should be treated as fatal
|
||||
func (e *Error) Fatal() bool {
|
||||
return e.Status == 403 // 403 errors shouldn't be retried
|
||||
}
|
||||
|
||||
var _ fs.Fataler = (*Error)(nil)
|
||||
|
||||
// Account describes a B2 account
|
||||
type Account struct {
|
||||
ID string `json:"accountId"` // The identifier for the account.
|
||||
}
|
||||
|
||||
// Bucket describes a B2 bucket
|
||||
type Bucket struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
|
||||
// fits in a 64 bit integer such as the type "long" in the programming
|
||||
// language Java. It is intended to be compatible with Java's time
|
||||
// long. For example, it can be passed directly into the java call
|
||||
// Date.setTime(long time).
|
||||
type Timestamp time.Time
|
||||
|
||||
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
timestamp := (*time.Time)(t).UTC().UnixNano()
|
||||
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Timestamp
|
||||
func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
timestamp, err := strconv.ParseInt(string(data), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
|
||||
return nil
|
||||
}
|
||||
|
||||
const versionFormat = "-v2006-01-02-150405.000"
|
||||
|
||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||
func (t Timestamp) AddVersion(remote string) string {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
s := (time.Time)(t).Format(versionFormat)
|
||||
// Replace the '.' with a '-'
|
||||
s = strings.Replace(s, ".", "-", -1)
|
||||
return base + s + ext
|
||||
}
|
||||
|
||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||
//
|
||||
// It returns the new file name and a timestamp, or the old filename
|
||||
// and a zero timestamp.
|
||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
newRemote = remote
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
if len(base) < len(versionFormat) {
|
||||
return
|
||||
}
|
||||
versionStart := len(base) - len(versionFormat)
|
||||
// Check it ends in -xxx
|
||||
if base[len(base)-4] != '-' {
|
||||
return
|
||||
}
|
||||
// Replace with .xxx for parsing
|
||||
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
||||
newT, err := time.Parse(versionFormat, base[versionStart:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is unitialised
|
||||
func (t Timestamp) IsZero() bool {
|
||||
return (time.Time)(t).IsZero()
|
||||
}
|
||||
|
||||
// Equal compares two timestamps
|
||||
//
|
||||
// If either are !IsZero then it returns false
|
||||
func (t Timestamp) Equal(s Timestamp) bool {
|
||||
if (time.Time)(t).IsZero() {
|
||||
return false
|
||||
}
|
||||
if (time.Time)(s).IsZero() {
|
||||
return false
|
||||
}
|
||||
return (time.Time)(t).Equal((time.Time)(s))
|
||||
}
|
||||
|
||||
// File is info about a file
|
||||
type File struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
Size int64 `json:"size"` // The number of bytes in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
}
|
||||
|
||||
// ListBucketsResponse is as returned from the b2_list_buckets call
|
||||
type ListBucketsResponse struct {
|
||||
Buckets []Bucket `json:"buckets"`
|
||||
}
|
||||
|
||||
// ListFileNamesRequest is as passed to b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesRequest struct {
|
||||
BucketID string `json:"bucketId"` // required - The bucket to look for file names in.
|
||||
StartFileName string `json:"startFileName,omitempty"` // optional - The first file name to return. If there is a file with this name, it will be returned in the list. If not, the first file name after this the first one after this name.
|
||||
MaxFileCount int `json:"maxFileCount,omitempty"` // optional - The maximum number of files to return from this call. The default value is 100, and the maximum allowed is 1000.
|
||||
StartFileID string `json:"startFileId,omitempty"` // optional - What to pass in to startFileId for the next search to continue where this one left off.
|
||||
}
|
||||
|
||||
// ListFileNamesResponse is as received from b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesResponse struct {
|
||||
Files []File `json:"files"` // An array of objects, each one describing one file.
|
||||
NextFileName *string `json:"nextFileName"` // What to pass in to startFileName for the next search to continue where this one left off, or null if there are no more files.
|
||||
NextFileID *string `json:"nextFileId"` // What to pass in to startFileId for the next search to continue where this one left off, or null if there are no more files.
|
||||
}
|
||||
|
||||
// GetUploadURLRequest is passed to b2_get_upload_url
|
||||
type GetUploadURLRequest struct {
|
||||
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
|
||||
}
|
||||
|
||||
// GetUploadURLResponse is received from b2_get_upload_url
|
||||
type GetUploadURLResponse struct {
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_file.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
|
||||
}
|
||||
|
||||
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
|
||||
type FileInfo struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
AccountID string `json:"accountId"` // Your account ID.
|
||||
BucketID string `json:"bucketId"` // The bucket that the file is in.
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// CreateBucketRequest is used to create a bucket
|
||||
type CreateBucketRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// DeleteBucketRequest is used to create a bucket
|
||||
type DeleteBucketRequest struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
}
|
||||
|
||||
// DeleteFileRequest is used to delete a file version
|
||||
type DeleteFileRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
}
|
||||
|
||||
// HideFileRequest is used to delete a file
|
||||
type HideFileRequest struct {
|
||||
BucketID string `json:"bucketId"` // The bucket containing the file to hide.
|
||||
Name string `json:"fileName"` // The name of the file to hide.
|
||||
}
|
||||
|
||||
// GetFileInfoRequest is used to return a FileInfo struct with b2_get_file_info
|
||||
type GetFileInfoRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
}
|
||||
|
||||
// StartLargeFileRequest (b2_start_large_file) Prepares for uploading the parts of a large file.
|
||||
//
|
||||
// If the original source of the file being uploaded has a last
|
||||
// modified time concept, Backblaze recommends using
|
||||
// src_last_modified_millis as the name, and a string holding the base
|
||||
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||
// programming language Java. It is intended to be compatible with
|
||||
// Java's time long. For example, it can be passed directly into the
|
||||
// Java call Date.setTime(long time).
|
||||
//
|
||||
// If the caller knows the SHA1 of the entire large file being
|
||||
// uploaded, Backblaze recommends using large_file_sha1 as the name,
|
||||
// and a 40 byte hex string representing the SHA1.
|
||||
//
|
||||
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
||||
type StartLargeFileRequest struct {
|
||||
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
|
||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||
}
|
||||
|
||||
// StartLargeFileResponse is the response to StartLargeFileRequest
|
||||
type StartLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
||||
type GetUploadPartURLRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLResponse is received from b2_get_upload_url
|
||||
type GetUploadPartURLResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_part.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_part.
|
||||
}
|
||||
|
||||
// UploadPartResponse is the response to b2_upload_part
|
||||
type UploadPartResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
}
|
||||
|
||||
// FinishLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a FileInfo object (with extra AccountID and BucketID fields which we ignore).
|
||||
//
|
||||
// Large files do not have a SHA1 checksum. The value will always be "none".
|
||||
type FinishLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
SHA1s []string `json:"partSha1Array"` // A JSON array of hex SHA1 checksums of the parts of the large file. This is a double-check that the right parts were uploaded in the right order, and that none were missed. Note that the part numbers start at 1, and the SHA1 of the part 1 is the first string in the array, at index 0.
|
||||
}
|
||||
|
||||
// CancelLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a CancelLargeFileResponse
|
||||
type CancelLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// CancelLargeFileResponse is the response to CancelLargeFileRequest
|
||||
type CancelLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
}
|
||||
87
b2/api/types_test.go
Normal file
87
b2/api/types_test.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package api_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/b2/api"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyT api.Timestamp
|
||||
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
||||
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
|
||||
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
||||
)
|
||||
|
||||
func TestTimestampMarshalJSON(t *testing.T) {
|
||||
resB, err := t0.MarshalJSON()
|
||||
res := string(resB)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "3661123", res)
|
||||
|
||||
resB, err = t1.MarshalJSON()
|
||||
res = string(resB)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "981173106123", res)
|
||||
}
|
||||
|
||||
func TestTimestampUnmarshalJSON(t *testing.T) {
|
||||
var tActual api.Timestamp
|
||||
err := tActual.UnmarshalJSON([]byte("981173106123"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
||||
}
|
||||
|
||||
func TestTimestampAddVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
t api.Timestamp
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
|
||||
{t1, "potato", "potato-v2001-02-03-040506-123"},
|
||||
{t1, "", "-v2001-02-03-040506-123"},
|
||||
} {
|
||||
actual := test.t.AddVersion(test.in)
|
||||
assert.Equal(t, test.expected, actual, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampRemoveVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedT api.Timestamp
|
||||
expectedRemote string
|
||||
}{
|
||||
{"potato.txt", emptyT, "potato.txt"},
|
||||
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
|
||||
{"potato-v2001-02-03-040506-123", t1, "potato"},
|
||||
{"-v2001-02-03-040506-123", t1, ""},
|
||||
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
|
||||
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
|
||||
} {
|
||||
actualT, actualRemote := api.RemoveVersion(test.in)
|
||||
assert.Equal(t, test.expectedT, actualT, test.in)
|
||||
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampIsZero(t *testing.T) {
|
||||
assert.True(t, emptyT.IsZero())
|
||||
assert.False(t, t0.IsZero())
|
||||
assert.False(t, t1.IsZero())
|
||||
}
|
||||
|
||||
func TestTimestampEqual(t *testing.T) {
|
||||
assert.False(t, emptyT.Equal(emptyT))
|
||||
assert.False(t, t0.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(t0))
|
||||
assert.False(t, t0.Equal(t1))
|
||||
assert.False(t, t1.Equal(t0))
|
||||
assert.True(t, t0.Equal(t0))
|
||||
assert.True(t, t1.Equal(t1))
|
||||
}
|
||||
284
b2/b2_internal_test.go
Normal file
284
b2/b2_internal_test.go
Normal file
@@ -0,0 +1,284 @@
|
||||
package b2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Test b2 string encoding
|
||||
// https://www.backblaze.com/b2/docs/string_encoding.html
|
||||
|
||||
var encodeTest = []struct {
|
||||
fullyEncoded string
|
||||
minimallyEncoded string
|
||||
plainText string
|
||||
}{
|
||||
{fullyEncoded: "%20", minimallyEncoded: "+", plainText: " "},
|
||||
{fullyEncoded: "%21", minimallyEncoded: "!", plainText: "!"},
|
||||
{fullyEncoded: "%22", minimallyEncoded: "%22", plainText: "\""},
|
||||
{fullyEncoded: "%23", minimallyEncoded: "%23", plainText: "#"},
|
||||
{fullyEncoded: "%24", minimallyEncoded: "$", plainText: "$"},
|
||||
{fullyEncoded: "%25", minimallyEncoded: "%25", plainText: "%"},
|
||||
{fullyEncoded: "%26", minimallyEncoded: "%26", plainText: "&"},
|
||||
{fullyEncoded: "%27", minimallyEncoded: "'", plainText: "'"},
|
||||
{fullyEncoded: "%28", minimallyEncoded: "(", plainText: "("},
|
||||
{fullyEncoded: "%29", minimallyEncoded: ")", plainText: ")"},
|
||||
{fullyEncoded: "%2A", minimallyEncoded: "*", plainText: "*"},
|
||||
{fullyEncoded: "%2B", minimallyEncoded: "%2B", plainText: "+"},
|
||||
{fullyEncoded: "%2C", minimallyEncoded: "%2C", plainText: ","},
|
||||
{fullyEncoded: "%2D", minimallyEncoded: "-", plainText: "-"},
|
||||
{fullyEncoded: "%2E", minimallyEncoded: ".", plainText: "."},
|
||||
{fullyEncoded: "%2F", minimallyEncoded: "/", plainText: "/"},
|
||||
{fullyEncoded: "%30", minimallyEncoded: "0", plainText: "0"},
|
||||
{fullyEncoded: "%31", minimallyEncoded: "1", plainText: "1"},
|
||||
{fullyEncoded: "%32", minimallyEncoded: "2", plainText: "2"},
|
||||
{fullyEncoded: "%33", minimallyEncoded: "3", plainText: "3"},
|
||||
{fullyEncoded: "%34", minimallyEncoded: "4", plainText: "4"},
|
||||
{fullyEncoded: "%35", minimallyEncoded: "5", plainText: "5"},
|
||||
{fullyEncoded: "%36", minimallyEncoded: "6", plainText: "6"},
|
||||
{fullyEncoded: "%37", minimallyEncoded: "7", plainText: "7"},
|
||||
{fullyEncoded: "%38", minimallyEncoded: "8", plainText: "8"},
|
||||
{fullyEncoded: "%39", minimallyEncoded: "9", plainText: "9"},
|
||||
{fullyEncoded: "%3A", minimallyEncoded: ":", plainText: ":"},
|
||||
{fullyEncoded: "%3B", minimallyEncoded: ";", plainText: ";"},
|
||||
{fullyEncoded: "%3C", minimallyEncoded: "%3C", plainText: "<"},
|
||||
{fullyEncoded: "%3D", minimallyEncoded: "=", plainText: "="},
|
||||
{fullyEncoded: "%3E", minimallyEncoded: "%3E", plainText: ">"},
|
||||
{fullyEncoded: "%3F", minimallyEncoded: "%3F", plainText: "?"},
|
||||
{fullyEncoded: "%40", minimallyEncoded: "@", plainText: "@"},
|
||||
{fullyEncoded: "%41", minimallyEncoded: "A", plainText: "A"},
|
||||
{fullyEncoded: "%42", minimallyEncoded: "B", plainText: "B"},
|
||||
{fullyEncoded: "%43", minimallyEncoded: "C", plainText: "C"},
|
||||
{fullyEncoded: "%44", minimallyEncoded: "D", plainText: "D"},
|
||||
{fullyEncoded: "%45", minimallyEncoded: "E", plainText: "E"},
|
||||
{fullyEncoded: "%46", minimallyEncoded: "F", plainText: "F"},
|
||||
{fullyEncoded: "%47", minimallyEncoded: "G", plainText: "G"},
|
||||
{fullyEncoded: "%48", minimallyEncoded: "H", plainText: "H"},
|
||||
{fullyEncoded: "%49", minimallyEncoded: "I", plainText: "I"},
|
||||
{fullyEncoded: "%4A", minimallyEncoded: "J", plainText: "J"},
|
||||
{fullyEncoded: "%4B", minimallyEncoded: "K", plainText: "K"},
|
||||
{fullyEncoded: "%4C", minimallyEncoded: "L", plainText: "L"},
|
||||
{fullyEncoded: "%4D", minimallyEncoded: "M", plainText: "M"},
|
||||
{fullyEncoded: "%4E", minimallyEncoded: "N", plainText: "N"},
|
||||
{fullyEncoded: "%4F", minimallyEncoded: "O", plainText: "O"},
|
||||
{fullyEncoded: "%50", minimallyEncoded: "P", plainText: "P"},
|
||||
{fullyEncoded: "%51", minimallyEncoded: "Q", plainText: "Q"},
|
||||
{fullyEncoded: "%52", minimallyEncoded: "R", plainText: "R"},
|
||||
{fullyEncoded: "%53", minimallyEncoded: "S", plainText: "S"},
|
||||
{fullyEncoded: "%54", minimallyEncoded: "T", plainText: "T"},
|
||||
{fullyEncoded: "%55", minimallyEncoded: "U", plainText: "U"},
|
||||
{fullyEncoded: "%56", minimallyEncoded: "V", plainText: "V"},
|
||||
{fullyEncoded: "%57", minimallyEncoded: "W", plainText: "W"},
|
||||
{fullyEncoded: "%58", minimallyEncoded: "X", plainText: "X"},
|
||||
{fullyEncoded: "%59", minimallyEncoded: "Y", plainText: "Y"},
|
||||
{fullyEncoded: "%5A", minimallyEncoded: "Z", plainText: "Z"},
|
||||
{fullyEncoded: "%5B", minimallyEncoded: "%5B", plainText: "["},
|
||||
{fullyEncoded: "%5C", minimallyEncoded: "%5C", plainText: "\\"},
|
||||
{fullyEncoded: "%5D", minimallyEncoded: "%5D", plainText: "]"},
|
||||
{fullyEncoded: "%5E", minimallyEncoded: "%5E", plainText: "^"},
|
||||
{fullyEncoded: "%5F", minimallyEncoded: "_", plainText: "_"},
|
||||
{fullyEncoded: "%60", minimallyEncoded: "%60", plainText: "`"},
|
||||
{fullyEncoded: "%61", minimallyEncoded: "a", plainText: "a"},
|
||||
{fullyEncoded: "%62", minimallyEncoded: "b", plainText: "b"},
|
||||
{fullyEncoded: "%63", minimallyEncoded: "c", plainText: "c"},
|
||||
{fullyEncoded: "%64", minimallyEncoded: "d", plainText: "d"},
|
||||
{fullyEncoded: "%65", minimallyEncoded: "e", plainText: "e"},
|
||||
{fullyEncoded: "%66", minimallyEncoded: "f", plainText: "f"},
|
||||
{fullyEncoded: "%67", minimallyEncoded: "g", plainText: "g"},
|
||||
{fullyEncoded: "%68", minimallyEncoded: "h", plainText: "h"},
|
||||
{fullyEncoded: "%69", minimallyEncoded: "i", plainText: "i"},
|
||||
{fullyEncoded: "%6A", minimallyEncoded: "j", plainText: "j"},
|
||||
{fullyEncoded: "%6B", minimallyEncoded: "k", plainText: "k"},
|
||||
{fullyEncoded: "%6C", minimallyEncoded: "l", plainText: "l"},
|
||||
{fullyEncoded: "%6D", minimallyEncoded: "m", plainText: "m"},
|
||||
{fullyEncoded: "%6E", minimallyEncoded: "n", plainText: "n"},
|
||||
{fullyEncoded: "%6F", minimallyEncoded: "o", plainText: "o"},
|
||||
{fullyEncoded: "%70", minimallyEncoded: "p", plainText: "p"},
|
||||
{fullyEncoded: "%71", minimallyEncoded: "q", plainText: "q"},
|
||||
{fullyEncoded: "%72", minimallyEncoded: "r", plainText: "r"},
|
||||
{fullyEncoded: "%73", minimallyEncoded: "s", plainText: "s"},
|
||||
{fullyEncoded: "%74", minimallyEncoded: "t", plainText: "t"},
|
||||
{fullyEncoded: "%75", minimallyEncoded: "u", plainText: "u"},
|
||||
{fullyEncoded: "%76", minimallyEncoded: "v", plainText: "v"},
|
||||
{fullyEncoded: "%77", minimallyEncoded: "w", plainText: "w"},
|
||||
{fullyEncoded: "%78", minimallyEncoded: "x", plainText: "x"},
|
||||
{fullyEncoded: "%79", minimallyEncoded: "y", plainText: "y"},
|
||||
{fullyEncoded: "%7A", minimallyEncoded: "z", plainText: "z"},
|
||||
{fullyEncoded: "%7B", minimallyEncoded: "%7B", plainText: "{"},
|
||||
{fullyEncoded: "%7C", minimallyEncoded: "%7C", plainText: "|"},
|
||||
{fullyEncoded: "%7D", minimallyEncoded: "%7D", plainText: "}"},
|
||||
{fullyEncoded: "%7E", minimallyEncoded: "~", plainText: "~"},
|
||||
{fullyEncoded: "%7F", minimallyEncoded: "%7F", plainText: "\u007f"},
|
||||
{fullyEncoded: "%E8%87%AA%E7%94%B1", minimallyEncoded: "%E8%87%AA%E7%94%B1", plainText: "自由"},
|
||||
{fullyEncoded: "%F0%90%90%80", minimallyEncoded: "%F0%90%90%80", plainText: "𐐀"},
|
||||
}
|
||||
|
||||
func TestUrlEncode(t *testing.T) {
|
||||
for _, test := range encodeTest {
|
||||
got := urlEncode(test.plainText)
|
||||
if got != test.minimallyEncoded && got != test.fullyEncoded {
|
||||
t.Errorf("urlEncode(%q) got %q wanted %q or %q", test.plainText, got, test.minimallyEncoded, test.fullyEncoded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in time.Time
|
||||
want string
|
||||
}{
|
||||
{fstest.Time("1970-01-01T00:00:00.000000000Z"), "0"},
|
||||
{fstest.Time("2001-02-03T04:05:10.123123123Z"), "981173110123"},
|
||||
{fstest.Time("2001-02-03T05:05:10.123123123+01:00"), "981173110123"},
|
||||
} {
|
||||
got := timeString(test.in)
|
||||
if test.want != got {
|
||||
t.Logf("%v: want %v got %v", test.in, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestParseTimeString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want time.Time
|
||||
wantError string
|
||||
}{
|
||||
{"0", fstest.Time("1970-01-01T00:00:00.000000000Z"), ""},
|
||||
{"981173110123", fstest.Time("2001-02-03T04:05:10.123000000Z"), ""},
|
||||
{"", time.Time{}, ""},
|
||||
{"potato", time.Time{}, `strconv.ParseInt: parsing "potato": invalid syntax`},
|
||||
} {
|
||||
o := Object{}
|
||||
err := o.parseTimeString(test.in)
|
||||
got := o.modTime
|
||||
var gotError string
|
||||
if err != nil {
|
||||
gotError = err.Error()
|
||||
}
|
||||
if test.want != got {
|
||||
t.Logf("%v: want %v got %v", test.in, test.want, got)
|
||||
}
|
||||
if test.wantError != gotError {
|
||||
t.Logf("%v: want error %v got error %v", test.in, test.wantError, gotError)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSendDir(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
lastDir string
|
||||
remote string
|
||||
level int
|
||||
dirNames []string
|
||||
newLastDir string
|
||||
}{
|
||||
{
|
||||
lastDir: "",
|
||||
remote: "test.txt",
|
||||
level: 100,
|
||||
dirNames: nil,
|
||||
newLastDir: "",
|
||||
},
|
||||
{
|
||||
lastDir: "",
|
||||
remote: "potato/test.txt",
|
||||
level: 100,
|
||||
dirNames: []string{"potato"},
|
||||
newLastDir: "potato",
|
||||
},
|
||||
{
|
||||
lastDir: "potato",
|
||||
remote: "potato/test.txt",
|
||||
level: 100,
|
||||
dirNames: nil,
|
||||
newLastDir: "potato",
|
||||
},
|
||||
{
|
||||
lastDir: "",
|
||||
remote: "potato/sausage/test.txt",
|
||||
level: 100,
|
||||
dirNames: []string{"potato", "potato/sausage"},
|
||||
newLastDir: "potato/sausage",
|
||||
},
|
||||
{
|
||||
lastDir: "potato",
|
||||
remote: "potato/sausage/test.txt",
|
||||
level: 100,
|
||||
dirNames: []string{"potato/sausage"},
|
||||
newLastDir: "potato/sausage",
|
||||
},
|
||||
{
|
||||
lastDir: "potato/sausage",
|
||||
remote: "potato/sausage/test.txt",
|
||||
level: 100,
|
||||
dirNames: nil,
|
||||
newLastDir: "potato/sausage",
|
||||
},
|
||||
{
|
||||
lastDir: "",
|
||||
remote: "a/b/c/d/e/f.txt",
|
||||
level: 100,
|
||||
dirNames: []string{"a", "a/b", "a/b/c", "a/b/c/d", "a/b/c/d/e"},
|
||||
newLastDir: "a/b/c/d/e",
|
||||
},
|
||||
{
|
||||
lastDir: "a/b/c/d/e",
|
||||
remote: "a/b/c/d/E/f.txt",
|
||||
level: 100,
|
||||
dirNames: []string{"a/b/c/d/E"},
|
||||
newLastDir: "a/b/c/d/E",
|
||||
},
|
||||
{
|
||||
lastDir: "a/b/c/d/e",
|
||||
remote: "a/b/C/D/E/f.txt",
|
||||
level: 100,
|
||||
dirNames: []string{"a/b/C", "a/b/C/D", "a/b/C/D/E"},
|
||||
newLastDir: "a/b/C/D/E",
|
||||
},
|
||||
{
|
||||
lastDir: "a/b/c",
|
||||
remote: "a/b/c/d/e/f.txt",
|
||||
level: 100,
|
||||
dirNames: []string{"a/b/c/d", "a/b/c/d/e"},
|
||||
newLastDir: "a/b/c/d/e",
|
||||
},
|
||||
{
|
||||
lastDir: "",
|
||||
remote: "a/b/c/d/e/f.txt",
|
||||
level: 1,
|
||||
dirNames: []string{"a"},
|
||||
newLastDir: "a/b/c/d/e",
|
||||
},
|
||||
{
|
||||
lastDir: "a/b/c",
|
||||
remote: "a/b/c/d/e/f.txt",
|
||||
level: 1,
|
||||
dirNames: nil,
|
||||
newLastDir: "a/b/c/d/e",
|
||||
},
|
||||
{
|
||||
lastDir: "",
|
||||
remote: "a/b/c/d/e/f.txt",
|
||||
level: 3,
|
||||
dirNames: []string{"a", "a/b", "a/b/c"},
|
||||
newLastDir: "a/b/c/d/e",
|
||||
},
|
||||
{
|
||||
lastDir: "a/b/C/D/E",
|
||||
remote: "a/b/c/d/e/f.txt",
|
||||
level: 3,
|
||||
dirNames: []string{"a/b/c"},
|
||||
newLastDir: "a/b/c/d/e",
|
||||
},
|
||||
} {
|
||||
dirNames, newLastDir := sendDir(test.lastDir, test.remote, test.level)
|
||||
assert.Equal(t, test.dirNames, dirNames, "dirNames fail for sendDir(%q,%q,%v)", test.lastDir, test.remote, test.level)
|
||||
assert.Equal(t, test.newLastDir, newLastDir, "newLastDir fail for sendDir(%q,%q,%v)", test.lastDir, test.remote, test.level)
|
||||
}
|
||||
}
|
||||
58
b2/b2_test.go
Normal file
58
b2/b2_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
// Test B2 filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package b2_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/b2"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*b2.Object)(nil))
|
||||
fstests.RemoteName = "TestB2:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
300
b2/upload.go
Normal file
300
b2/upload.go
Normal file
@@ -0,0 +1,300 @@
|
||||
// Upload large files for b2
|
||||
//
|
||||
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// largeUpload is used to control the upload of large files which need chunking
|
||||
type largeUpload struct {
|
||||
f *Fs // parent Fs
|
||||
o *Object // object being uploaded
|
||||
in io.Reader // read the data from here
|
||||
id string // ID of the file being uploaded
|
||||
size int64 // total size
|
||||
parts int64 // calculated number of parts
|
||||
sha1s []string // slice of SHA1s for each part
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||
}
|
||||
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||
remote := o.remote
|
||||
size := src.Size()
|
||||
parts := size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
}
|
||||
modTime := src.ModTime()
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: o.fs.root + remote,
|
||||
ContentType: fs.MimeType(src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
},
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if calculatedSha1, err := src.Hash(fs.HashSHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up = &largeUpload{
|
||||
f: f,
|
||||
o: o,
|
||||
in: in,
|
||||
id: response.ID,
|
||||
size: size,
|
||||
parts: parts,
|
||||
sha1s: make([]string, parts),
|
||||
}
|
||||
return up, nil
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
|
||||
up.uploadMu.Lock()
|
||||
defer up.uploadMu.Unlock()
|
||||
if len(up.uploads) == 0 {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_get_upload_part_url",
|
||||
}
|
||||
var request = api.GetUploadPartURLRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
}
|
||||
} else {
|
||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||
}
|
||||
return upload, nil
|
||||
}
|
||||
|
||||
// returnUploadURL returns the UploadURL to the cache
|
||||
func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
||||
if upload == nil {
|
||||
return
|
||||
}
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = append(up.uploads, upload)
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// clearUploadURL clears the current UploadURL and the AuthorizationToken
|
||||
func (up *largeUpload) clearUploadURL() {
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = nil
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
calculatedSHA1 := fmt.Sprintf("%x", sha1.Sum(body))
|
||||
up.sha1s[part-1] = calculatedSHA1
|
||||
size := int64(len(body))
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debug(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
|
||||
// Get upload URL
|
||||
upload, err := up.getUploadURL()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Authorization
|
||||
//
|
||||
// An upload authorization token, from b2_get_upload_part_url.
|
||||
//
|
||||
// X-Bz-Part-Number
|
||||
//
|
||||
// A number from 1 to 10000. The parts uploaded for one file
|
||||
// must have contiguous numbers, starting with 1.
|
||||
//
|
||||
// Content-Length
|
||||
//
|
||||
// The number of bytes in the file being uploaded. Note that
|
||||
// this header is required; you cannot leave it out and just
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100MB.
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
//
|
||||
// The SHA1 checksum of the this part of the file. B2 will
|
||||
// check this when the part is uploaded, to make sure that the
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// passed to b2_finish_large_file.
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Absolute: true,
|
||||
Path: upload.UploadURL,
|
||||
Body: bytes.NewBuffer(body),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
||||
sha1Header: calculatedSHA1,
|
||||
},
|
||||
ContentLength: &size,
|
||||
}
|
||||
|
||||
var response api.UploadPartResponse
|
||||
|
||||
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
|
||||
retry, err := up.f.shouldRetryNoReauth(resp, err)
|
||||
// On retryable error clear PartUploadURL
|
||||
if retry {
|
||||
fs.Debug(up.o, "Clearing part upload URL because of error: %v", err)
|
||||
upload = nil
|
||||
}
|
||||
up.returnUploadURL(upload)
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debug(up.o, "Error sending chunk %d: %v", part, err)
|
||||
} else {
|
||||
fs.Debug(up.o, "Done sending chunk %d", part)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish() error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_finish_large_file",
|
||||
}
|
||||
var request = api.FinishLargeFileRequest{
|
||||
ID: up.id,
|
||||
SHA1s: up.sha1s,
|
||||
}
|
||||
var response api.FileInfo
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel() error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_cancel_large_file",
|
||||
}
|
||||
var request = api.CancelLargeFileRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
var response api.CancelLargeFileResponse
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload() error {
|
||||
fs.Debug(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
||||
remaining := up.size
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
var err error
|
||||
outer:
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
buf := make([]byte, reqSize)
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
break outer
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
// Get upload Token
|
||||
up.f.getUploadToken()
|
||||
wg.Add(1)
|
||||
go func(part int64, buf []byte) {
|
||||
defer up.f.returnUploadToken()
|
||||
defer wg.Done()
|
||||
err := up.transferChunk(part, buf)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}(part, buf)
|
||||
|
||||
remaining -= reqSize
|
||||
}
|
||||
wg.Wait()
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
fs.Debug(up.o, "Cancelling large file upload due to error: %v", err)
|
||||
cancelErr := up.cancel()
|
||||
if cancelErr != nil {
|
||||
fs.ErrorLog(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
// Check any errors
|
||||
fs.Debug(up.o, "Finishing large file upload")
|
||||
return up.finish()
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
@@ -13,10 +13,37 @@ VERSION="$1"
|
||||
|
||||
rm -rf build
|
||||
|
||||
gox -output "build/{{.Dir}}-${VERSION}-{{.OS}}-{{.Arch}}/{{.Dir}}" -os "darwin linux freebsd openbsd windows freebsd netbsd plan9 solaris"
|
||||
# Not implemented yet: nacl dragonfly android
|
||||
# Disable CGO and dynamic builds on all platforms (including build patform)
|
||||
export CGO_ENABLED=0
|
||||
|
||||
# Arch pairs we build for
|
||||
# gox -osarch-list for definitive list
|
||||
|
||||
OSARCH="\
|
||||
windows/386
|
||||
windows/amd64
|
||||
darwin/386
|
||||
darwin/amd64
|
||||
linux/386
|
||||
linux/amd64
|
||||
linux/arm
|
||||
freebsd/386
|
||||
freebsd/amd64
|
||||
freebsd/arm
|
||||
netbsd/386
|
||||
netbsd/amd64
|
||||
netbsd/arm
|
||||
openbsd/386
|
||||
openbsd/amd64
|
||||
plan9/386
|
||||
plan9/amd64
|
||||
solaris/amd64"
|
||||
|
||||
# Make space separated
|
||||
OSARCH=${OSARCH//$'\n'/ }
|
||||
|
||||
gox --ldflags "-s -X github.com/ncw/rclone/fs.Version=${VERSION}" -output "build/{{.Dir}}-${VERSION}-{{.OS}}-{{.Arch}}/{{.Dir}}" -osarch "${OSARCH}"
|
||||
|
||||
mv build/rclone-${VERSION}-darwin-amd64 build/rclone-${VERSION}-osx-amd64
|
||||
mv build/rclone-${VERSION}-darwin-386 build/rclone-${VERSION}-osx-386
|
||||
|
||||
|
||||
@@ -4,17 +4,20 @@ package dircache
|
||||
// _methods are called without the lock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DirCache caches paths to directory IDs and vice versa
|
||||
type DirCache struct {
|
||||
mu sync.RWMutex
|
||||
cacheMu sync.RWMutex
|
||||
cache map[string]string
|
||||
invCache map[string]string
|
||||
mu sync.Mutex
|
||||
fs DirCacher // Interface to find and make stuff
|
||||
trueRootID string // ID of the absolute root
|
||||
root string // the path we are working on
|
||||
@@ -43,52 +46,36 @@ func New(root string, trueRootID string, fs DirCacher) *DirCache {
|
||||
return d
|
||||
}
|
||||
|
||||
// _get an ID given a path - without lock
|
||||
func (dc *DirCache) _get(path string) (id string, ok bool) {
|
||||
id, ok = dc.cache[path]
|
||||
return
|
||||
}
|
||||
|
||||
// Get an ID given a path
|
||||
func (dc *DirCache) Get(path string) (id string, ok bool) {
|
||||
dc.mu.RLock()
|
||||
id, ok = dc._get(path)
|
||||
dc.mu.RUnlock()
|
||||
dc.cacheMu.RLock()
|
||||
id, ok = dc.cache[path]
|
||||
dc.cacheMu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// GetInv gets a path given an ID
|
||||
func (dc *DirCache) GetInv(path string) (id string, ok bool) {
|
||||
dc.mu.RLock()
|
||||
id, ok = dc.invCache[path]
|
||||
dc.mu.RUnlock()
|
||||
func (dc *DirCache) GetInv(id string) (path string, ok bool) {
|
||||
dc.cacheMu.RLock()
|
||||
path, ok = dc.invCache[id]
|
||||
dc.cacheMu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// _put a path, id into the map without lock
|
||||
func (dc *DirCache) _put(path, id string) {
|
||||
dc.cache[path] = id
|
||||
dc.invCache[id] = path
|
||||
}
|
||||
|
||||
// Put a path, id into the map
|
||||
func (dc *DirCache) Put(path, id string) {
|
||||
dc.mu.Lock()
|
||||
dc._put(path, id)
|
||||
dc.mu.Unlock()
|
||||
}
|
||||
|
||||
// _flush the map of all data without lock
|
||||
func (dc *DirCache) _flush() {
|
||||
dc.cache = make(map[string]string)
|
||||
dc.invCache = make(map[string]string)
|
||||
dc.cacheMu.Lock()
|
||||
dc.cache[path] = id
|
||||
dc.invCache[id] = path
|
||||
dc.cacheMu.Unlock()
|
||||
}
|
||||
|
||||
// Flush the map of all data
|
||||
func (dc *DirCache) Flush() {
|
||||
dc.mu.Lock()
|
||||
dc._flush()
|
||||
dc.mu.Unlock()
|
||||
dc.cacheMu.Lock()
|
||||
dc.cache = make(map[string]string)
|
||||
dc.invCache = make(map[string]string)
|
||||
dc.cacheMu.Unlock()
|
||||
}
|
||||
|
||||
// SplitPath splits a path into directory, leaf
|
||||
@@ -120,8 +107,8 @@ func SplitPath(path string) (directory, leaf string) {
|
||||
// If not found strip the last path off the path and recurse
|
||||
// Now have a parent directory id, so look in the parent for self and return it
|
||||
func (dc *DirCache) FindDir(path string, create bool) (pathID string, err error) {
|
||||
dc.mu.RLock()
|
||||
defer dc.mu.RUnlock()
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
return dc._findDir(path, create)
|
||||
}
|
||||
|
||||
@@ -135,7 +122,7 @@ func (dc *DirCache) _findDirInCache(path string) string {
|
||||
}
|
||||
|
||||
// If it is in the cache then return it
|
||||
pathID, ok := dc._get(path)
|
||||
pathID, ok := dc.Get(path)
|
||||
if ok {
|
||||
// fmt.Println("Cache hit on", path)
|
||||
return pathID
|
||||
@@ -146,10 +133,6 @@ func (dc *DirCache) _findDirInCache(path string) string {
|
||||
|
||||
// Unlocked findDir - must have mu
|
||||
func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error) {
|
||||
// if !dc.foundRoot {
|
||||
// return "", fmt.Errorf("FindDir called before FindRoot")
|
||||
// }
|
||||
|
||||
pathID = dc._findDirInCache(path)
|
||||
if pathID != "" {
|
||||
return pathID, nil
|
||||
@@ -176,15 +159,15 @@ func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error
|
||||
if create {
|
||||
pathID, err = dc.fs.CreateDir(parentPathID, leaf)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to make directory: %v", err)
|
||||
return "", errors.Wrap(err, "failed to make directory")
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("Couldn't find directory: %q", path)
|
||||
return "", fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// Store the leaf directory in the cache
|
||||
dc._put(path, pathID)
|
||||
dc.Put(path, pathID)
|
||||
|
||||
// fmt.Println("Dir", path, "is", pathID)
|
||||
return pathID, nil
|
||||
@@ -198,13 +181,6 @@ func (dc *DirCache) FindPath(path string, create bool) (leaf, directoryID string
|
||||
defer dc.mu.Unlock()
|
||||
directory, leaf := SplitPath(path)
|
||||
directoryID, err = dc._findDir(directory, create)
|
||||
if err != nil {
|
||||
if create {
|
||||
err = fmt.Errorf("Couldn't find or make directory %q: %s", directory, err)
|
||||
} else {
|
||||
err = fmt.Errorf("Couldn't find directory %q: %s", directory, err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -219,26 +195,32 @@ func (dc *DirCache) FindRoot(create bool) error {
|
||||
if dc.foundRoot {
|
||||
return nil
|
||||
}
|
||||
dc.foundRoot = true
|
||||
rootID, err := dc._findDir(dc.root, create)
|
||||
if err != nil {
|
||||
dc.foundRoot = false
|
||||
return err
|
||||
}
|
||||
dc.foundRoot = true
|
||||
dc.rootID = rootID
|
||||
|
||||
// Find the parent of the root while we still have the root
|
||||
// directory tree cached
|
||||
rootParentPath, _ := SplitPath(dc.root)
|
||||
dc.rootParentID, _ = dc._get(rootParentPath)
|
||||
dc.rootParentID, _ = dc.Get(rootParentPath)
|
||||
|
||||
// Reset the tree based on dc.root
|
||||
dc._flush()
|
||||
dc.Flush()
|
||||
// Put the root directory in
|
||||
dc._put("", dc.rootID)
|
||||
dc.Put("", dc.rootID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FoundRoot returns whether the root directory has been found yet
|
||||
//
|
||||
// Call this from FindLeaf or CreateDir only
|
||||
func (dc *DirCache) FoundRoot() bool {
|
||||
return dc.foundRoot
|
||||
}
|
||||
|
||||
// RootID returns the ID of the root directory
|
||||
//
|
||||
// This should be called after FindRoot
|
||||
@@ -258,13 +240,13 @@ func (dc *DirCache) RootParentID() (string, error) {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
if !dc.foundRoot {
|
||||
return "", fmt.Errorf("Internal Error: RootID() called before FindRoot")
|
||||
return "", errors.New("internal error: RootID() called before FindRoot")
|
||||
}
|
||||
if dc.rootParentID == "" {
|
||||
return "", fmt.Errorf("Internal Error: Didn't find rootParentID")
|
||||
return "", errors.New("internal error: didn't find rootParentID")
|
||||
}
|
||||
if dc.rootID == dc.trueRootID {
|
||||
return "", fmt.Errorf("Is root directory")
|
||||
return "", errors.New("is root directory")
|
||||
}
|
||||
return dc.rootParentID, nil
|
||||
}
|
||||
@@ -275,11 +257,11 @@ func (dc *DirCache) ResetRoot() {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
dc.foundRoot = false
|
||||
dc._flush()
|
||||
dc.Flush()
|
||||
|
||||
// Put the true root in
|
||||
dc.rootID = dc.trueRootID
|
||||
|
||||
// Put the root directory in
|
||||
dc._put("", dc.rootID)
|
||||
dc.Put("", dc.rootID)
|
||||
}
|
||||
|
||||
82
dircache/list.go
Normal file
82
dircache/list.go
Normal file
@@ -0,0 +1,82 @@
|
||||
// Listing utility functions for fses which use dircache
|
||||
|
||||
package dircache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// ListDirJob describe a directory listing that needs to be done
|
||||
type ListDirJob struct {
|
||||
DirID string
|
||||
Path string
|
||||
Depth int
|
||||
}
|
||||
|
||||
// ListDirer describes the interface necessary to use ListDir
|
||||
type ListDirer interface {
|
||||
// ListDir reads the directory specified by the job into out, returning any more jobs
|
||||
ListDir(out fs.ListOpts, job ListDirJob) (jobs []ListDirJob, err error)
|
||||
}
|
||||
|
||||
// listDir lists the directory using a recursive list from the root
|
||||
//
|
||||
// It does this in parallel, calling f.ListDir to do the actual reading
|
||||
func listDir(f ListDirer, out fs.ListOpts, dirID string, path string) {
|
||||
// Start some directory listing go routines
|
||||
var wg sync.WaitGroup // sync closing of go routines
|
||||
var traversing sync.WaitGroup // running directory traversals
|
||||
buffer := out.Buffer()
|
||||
in := make(chan ListDirJob, buffer)
|
||||
for i := 0; i < buffer; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for job := range in {
|
||||
jobs, err := f.ListDir(out, job)
|
||||
if err != nil {
|
||||
out.SetError(err)
|
||||
fs.Debug(f, "Error reading %s: %s", path, err)
|
||||
} else {
|
||||
traversing.Add(len(jobs))
|
||||
go func() {
|
||||
// Now we have traversed this directory, send these
|
||||
// jobs off for traversal in the background
|
||||
for _, job := range jobs {
|
||||
in <- job
|
||||
}
|
||||
}()
|
||||
}
|
||||
traversing.Done()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Start the process
|
||||
traversing.Add(1)
|
||||
in <- ListDirJob{DirID: dirID, Path: path, Depth: out.Level() - 1}
|
||||
traversing.Wait()
|
||||
close(in)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// List walks the path returning iles and directories into out
|
||||
func (dc *DirCache) List(f ListDirer, out fs.ListOpts, dir string) {
|
||||
defer out.Finished()
|
||||
err := dc.FindRoot(false)
|
||||
if err != nil {
|
||||
out.SetError(err)
|
||||
return
|
||||
}
|
||||
id, err := dc.FindDir(dir, false)
|
||||
if err != nil {
|
||||
out.SetError(err)
|
||||
return
|
||||
}
|
||||
if dir != "" {
|
||||
dir += "/"
|
||||
}
|
||||
listDir(f, out, id, dir)
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "Rclone"
|
||||
description: "rclone syncs files to and from Google Drive, S3, Swift, Cloudfiles, Dropbox, Google Cloud Storage and Amazon Cloud Drive."
|
||||
description: "rclone syncs files to and from Google Drive, S3, Swift, Cloudfiles, Dropbox, Google Cloud Storage and Amazon Drive."
|
||||
type: page
|
||||
date: "2015-09-06"
|
||||
groups: ["about"]
|
||||
@@ -18,18 +18,22 @@ Rclone is a command line program to sync files and directories to and from
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore
|
||||
* Dropbox
|
||||
* Google Cloud Storage
|
||||
* Amazon Cloud Drive
|
||||
* Amazon Drive
|
||||
* Microsoft One Drive
|
||||
* Hubic
|
||||
* Backblaze B2
|
||||
* Yandex Disk
|
||||
* The local filesystem
|
||||
|
||||
Features
|
||||
|
||||
* MD5SUMs checked at all times for file integrity
|
||||
* MD5/SHA1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* Copy mode to just copy new/changed files
|
||||
* Sync mode to make a directory identical
|
||||
* Check mode to check all MD5SUMs
|
||||
* Can sync to and from network, eg two different Drive accounts
|
||||
* Sync (one way) mode to make a directory identical
|
||||
* Check mode to check for file hash equality
|
||||
* Can sync to and from network, eg two different cloud accounts
|
||||
|
||||
Links
|
||||
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
---
|
||||
title: "Amazon Cloud Drive"
|
||||
description: "Rclone docs for Amazon Cloud Drive"
|
||||
date: "2015-09-06"
|
||||
title: "Amazon Drive"
|
||||
description: "Rclone docs for Amazon Drive"
|
||||
date: "2016-07-11"
|
||||
---
|
||||
|
||||
<i class="fa fa-google"></i> Amazon Cloud Drive
|
||||
<i class="fa fa-amazon"></i> Amazon Drive
|
||||
-----------------------------------------
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||
|
||||
The initial setup for Amazon cloud drive involves getting a token from
|
||||
The initial setup for Amazon Drive involves getting a token from
|
||||
Amazon which you need to do in your browser. `rclone config` walks
|
||||
you through it.
|
||||
|
||||
@@ -27,16 +27,31 @@ d) Delete remote
|
||||
q) Quit config
|
||||
e/n/d/q> n
|
||||
name> remote
|
||||
What type of source is it?
|
||||
Choose a number from below
|
||||
1) amazon cloud drive
|
||||
2) drive
|
||||
3) dropbox
|
||||
4) google cloud storage
|
||||
5) local
|
||||
6) s3
|
||||
7) swift
|
||||
type> 1
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Dropbox
|
||||
\ "dropbox"
|
||||
5 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
6 / Google Drive
|
||||
\ "drive"
|
||||
7 / Hubic
|
||||
\ "hubic"
|
||||
8 / Local Disk
|
||||
\ "local"
|
||||
9 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
11 / Yandex Disk
|
||||
\ "yandex"
|
||||
Storage> 1
|
||||
Amazon Application Client Id - leave blank normally.
|
||||
client_id>
|
||||
Amazon Application Client Secret - leave blank normally.
|
||||
@@ -58,6 +73,9 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Amazon. This only runs from the moment it
|
||||
opens your browser to the moment you get back the verification
|
||||
@@ -66,21 +84,21 @@ you to unblock it temporarily if you are running a host firewall.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
List directories in top level of your Amazon cloud drive
|
||||
List directories in top level of your Amazon Drive
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
List all the files in your Amazon cloud drive
|
||||
List all the files in your Amazon Drive
|
||||
|
||||
rclone ls remote:
|
||||
|
||||
To copy a local directory to an Amazon cloud drive directory called backup
|
||||
To copy a local directory to an Amazon Drive directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
### Modified time and MD5SUMs ###
|
||||
|
||||
Amazon cloud drive doesn't allow modification times to be changed via
|
||||
Amazon Drive doesn't allow modification times to be changed via
|
||||
the API so these won't be accurate or used for syncing.
|
||||
|
||||
It does store MD5SUMs so for a more accurate sync, you can use the
|
||||
@@ -91,14 +109,42 @@ It does store MD5SUMs so for a more accurate sync, you can use the
|
||||
Any files you delete with rclone will end up in the trash. Amazon
|
||||
don't provide an API to permanently delete files, nor to empty the
|
||||
trash, so you will have to do that with one of Amazon's apps or via
|
||||
the Amazon cloud drive website.
|
||||
the Amazon Drive website.
|
||||
|
||||
### Specific options ###
|
||||
|
||||
Here are the command line options specific to this cloud storage
|
||||
system.
|
||||
|
||||
#### --acd-templink-threshold=SIZE ####
|
||||
|
||||
Files this size or more will be downloaded via their `tempLink`. This
|
||||
is to work around a problem with Amazon Drive which blocks downloads
|
||||
of files bigger than about 10GB. The default for this is 9GB which
|
||||
shouldn't need to be changed.
|
||||
|
||||
To download files above this threshold, rclone requests a `tempLink`
|
||||
which downloads the file through a temporary URL directly from the
|
||||
underlying S3 storage.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that Amazon cloud drive is case sensitive so you can't have a
|
||||
Note that Amazon Drive is case insensitive so you can't have a
|
||||
file called "Hello.doc" and one called "hello.doc".
|
||||
|
||||
Amazon cloud drive has rate limiting so you may notice errors in the
|
||||
Amazon Drive has rate limiting so you may notice errors in the
|
||||
sync (429 errors). rclone will automatically retry the sync up to 3
|
||||
times by default (see `--retries` flag) which should hopefully work
|
||||
around this problem.
|
||||
|
||||
Amazon Drive has an internal limit of file sizes that can be uploaded
|
||||
to the service. This limit is not officially published, but all files
|
||||
larger than this will fail.
|
||||
|
||||
At the time of writing (Jan 2016) is in the area of 50GB per file.
|
||||
This means that larger files are likely to fail.
|
||||
|
||||
Unfortunatly there is no way for rclone to see that this failure is
|
||||
because of file size, so it will retry the operation, as any other
|
||||
failure. To avoid this problem, use `--max-size=50GB` option to limit
|
||||
the maximum size of uploaded files.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Authors"
|
||||
description: "Rclone Authors and Contributors"
|
||||
date: "2015-09-28"
|
||||
date: "2016-04-22"
|
||||
---
|
||||
|
||||
Authors
|
||||
@@ -18,3 +18,20 @@ Contributors
|
||||
* Colin Nicholson <colin@colinn.com>
|
||||
* Klaus Post <klauspost@gmail.com>
|
||||
* Sergey Tolmachev <tolsi.ru@gmail.com>
|
||||
* Adriano Aurélio Meirelles <adriano@atinge.com>
|
||||
* C. Bess <cbess@users.noreply.github.com>
|
||||
* Dmitry Burdeev <dibu28@gmail.com>
|
||||
* Joseph Spurrier <github@josephspurrier.com>
|
||||
* Björn Harrtell <bjorn@wololo.org>
|
||||
* Xavier Lucas <xavier.lucas@corp.ovh.com>
|
||||
* Werner Beroux <werner@beroux.com>
|
||||
* Brian Stengaard <brian@stengaard.eu>
|
||||
* Jakub Gedeon <jgedeon@sofi.com>
|
||||
* Jim Tittsler <jwt@onjapan.net>
|
||||
* Michal Witkowski <michal@improbable.io>
|
||||
* Fabian Ruff <fabian.ruff@sap.com>
|
||||
* Leigh Klotz <klotz@quixey.com>
|
||||
* Romain Lapray <lapray.romain@gmail.com>
|
||||
* Justin R. Wilson <jrw972@gmail.com>
|
||||
* Antonio Messina <antonio.s.messina@gmail.com>
|
||||
* Stefan G. Weichinger <office@oops.co.at>
|
||||
|
||||
248
docs/content/b2.md
Normal file
248
docs/content/b2.md
Normal file
@@ -0,0 +1,248 @@
|
||||
---
|
||||
title: "B2"
|
||||
description: "Backblaze B2"
|
||||
date: "2016-06-15"
|
||||
---
|
||||
|
||||
<i class="fa fa-fire"></i>Backblaze B2
|
||||
----------------------------------------
|
||||
|
||||
B2 is [Backblaze's cloud storage system](https://www.backblaze.com/b2/).
|
||||
|
||||
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
|
||||
command.) You may put subdirectories in too, eg `remote:bucket/path/to/dir`.
|
||||
|
||||
Here is an example of making a b2 configuration. First run
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process. You will
|
||||
need your account number (a short hex number) and key (a long hex
|
||||
number) which you can get from the b2 control panel.
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
q) Quit config
|
||||
n/q> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Dropbox
|
||||
\ "dropbox"
|
||||
5 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
6 / Google Drive
|
||||
\ "drive"
|
||||
7 / Hubic
|
||||
\ "hubic"
|
||||
8 / Local Disk
|
||||
\ "local"
|
||||
9 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
11 / Yandex Disk
|
||||
\ "yandex"
|
||||
Storage> 3
|
||||
Account ID
|
||||
account> 123456789abc
|
||||
Application Key
|
||||
key> 0123456789abcdef0123456789abcdef0123456789
|
||||
Endpoint for the service - leave blank normally.
|
||||
endpoint>
|
||||
Remote config
|
||||
--------------------
|
||||
[remote]
|
||||
account = 123456789abc
|
||||
key = 0123456789abcdef0123456789abcdef0123456789
|
||||
endpoint =
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
This remote is called `remote` and can now be used like this
|
||||
|
||||
See all buckets
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
Make a new bucket
|
||||
|
||||
rclone mkdir remote:bucket
|
||||
|
||||
List the contents of a bucket
|
||||
|
||||
rclone ls remote:bucket
|
||||
|
||||
Sync `/home/local/directory` to the remote bucket, deleting any
|
||||
excess files in the bucket.
|
||||
|
||||
rclone sync /home/local/directory remote:bucket
|
||||
|
||||
### Modified time ###
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`X-Bz-Info-src_last_modified_millis` as milliseconds since 1970-01-01
|
||||
in the Backblaze standard. Other tools should be able to use this as
|
||||
a modified time.
|
||||
|
||||
Modified times are used in syncing and are fully supported except in
|
||||
the case of updating a modification time on an existing object. In
|
||||
this case the object will be uploaded again as B2 doesn't have an API
|
||||
method to set the modification time independent of doing an upload.
|
||||
|
||||
### SHA1 checksums ###
|
||||
|
||||
The SHA1 checksums of the files are checked on upload and download and
|
||||
will be used in the syncing process.
|
||||
|
||||
Large files which are uploaded in chunks will store their SHA1 on the
|
||||
object as `X-Bz-Info-large_file_sha1` as recommended by Backblaze.
|
||||
|
||||
### Transfers ###
|
||||
|
||||
Backblaze recommends that you do lots of transfers simultaneously for
|
||||
maximum speed. In tests from my SSD equiped laptop the optimum
|
||||
setting is about `--transfers 32` though higher numbers may be used
|
||||
for a slight speed improvement. The optimum number for you may vary
|
||||
depending on your hardware, how big the files are, how much you want
|
||||
to load your computer, etc. The default of `--transfers 4` is
|
||||
definitely too low for Backblaze B2 though.
|
||||
|
||||
Note that uploading big files (bigger than 200 MB by default) will use
|
||||
a 96 MB RAM buffer by default. There can be at most `--transfers` of
|
||||
these in use at any moment, so this sets the upper limit on the memory
|
||||
used.
|
||||
|
||||
### Versions ###
|
||||
|
||||
When rclone uploads a new version of a file it creates a [new version
|
||||
of it](https://www.backblaze.com/b2/docs/file_versions.html).
|
||||
Likewise when you delete a file, the old version will still be
|
||||
available.
|
||||
|
||||
Old versions of files are visible using the `--b2-versions` flag.
|
||||
|
||||
If you wish to remove all the old versions then you can use the
|
||||
`rclone cleanup remote:bucket` command which will delete all the old
|
||||
versions of files, leaving the current ones intact. You can also
|
||||
supply a path and only old versions under that path will be deleted,
|
||||
eg `rclone cleanup remote:bucket/path/to/stuff`.
|
||||
|
||||
When you `purge` a bucket, the current and the old versions will be
|
||||
deleted then the bucket will be deleted.
|
||||
|
||||
However `delete` will cause the current versions of the files to
|
||||
become hidden old versions.
|
||||
|
||||
Here is a session showing the listing and and retreival of an old
|
||||
version followed by a `cleanup` of the old versions.
|
||||
|
||||
Show current version and all the versions with `--b2-versions` flag.
|
||||
|
||||
```
|
||||
$ rclone -q ls b2:cleanup-test
|
||||
9 one.txt
|
||||
|
||||
$ rclone -q --b2-versions ls b2:cleanup-test
|
||||
9 one.txt
|
||||
8 one-v2016-07-04-141032-000.txt
|
||||
16 one-v2016-07-04-141003-000.txt
|
||||
15 one-v2016-07-02-155621-000.txt
|
||||
```
|
||||
|
||||
Retreive an old verson
|
||||
|
||||
```
|
||||
$ rclone -q --b2-versions copy b2:cleanup-test/one-v2016-07-04-141003-000.txt /tmp
|
||||
|
||||
$ ls -l /tmp/one-v2016-07-04-141003-000.txt
|
||||
-rw-rw-r-- 1 ncw ncw 16 Jul 2 17:46 /tmp/one-v2016-07-04-141003-000.txt
|
||||
```
|
||||
|
||||
Clean up all the old versions and show that they've gone.
|
||||
|
||||
```
|
||||
$ rclone -q cleanup b2:cleanup-test
|
||||
|
||||
$ rclone -q ls b2:cleanup-test
|
||||
9 one.txt
|
||||
|
||||
$ rclone -q --b2-versions ls b2:cleanup-test
|
||||
9 one.txt
|
||||
```
|
||||
|
||||
### Specific options ###
|
||||
|
||||
Here are the command line options specific to this cloud storage
|
||||
system.
|
||||
|
||||
#### --b2-chunk-size valuee=SIZE ####
|
||||
|
||||
When uploading large files chunk the file into this size. Note that
|
||||
these chunks are buffered in memory and there might a maximum of
|
||||
`--transfers` chunks in progress at once. 100,000,000 Bytes is the
|
||||
minimim size (default 96M).
|
||||
|
||||
#### --b2-upload-cutoff=SIZE ####
|
||||
|
||||
Cutoff for switching to chunked upload (default 190.735 MiB == 200
|
||||
MB). Files above this size will be uploaded in chunks of
|
||||
`--b2-chunk-size`.
|
||||
|
||||
This value should be set no larger than 4.657GiB (== 5GB) as this is
|
||||
the largest file size that can be uploaded.
|
||||
|
||||
|
||||
#### --b2-test-mode=FLAG ####
|
||||
|
||||
This is for debugging purposes only.
|
||||
|
||||
Setting FLAG to one of the strings below will cause b2 to return
|
||||
specific errors for debugging purposes.
|
||||
|
||||
* `fail_some_uploads`
|
||||
* `expire_some_account_authorization_tokens`
|
||||
* `force_cap_exceeded`
|
||||
|
||||
These will be set in the `X-Bz-Test-Mode` header which is documented
|
||||
in the [b2 integrations
|
||||
checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).
|
||||
|
||||
#### --b2-versions ####
|
||||
|
||||
When set rclone will show and act on older versions of files. For example
|
||||
|
||||
Listing without `--b2-versions`
|
||||
|
||||
```
|
||||
$ rclone -q ls b2:cleanup-test
|
||||
9 one.txt
|
||||
```
|
||||
|
||||
And with
|
||||
|
||||
```
|
||||
$ rclone -q --b2-versions ls b2:cleanup-test
|
||||
9 one.txt
|
||||
8 one-v2016-07-04-141032-000.txt
|
||||
16 one-v2016-07-04-141003-000.txt
|
||||
15 one-v2016-07-02-155621-000.txt
|
||||
```
|
||||
|
||||
Showing that the current version is unchanged but older versions can
|
||||
be seen. These have the UTC date that they were uploaded to the
|
||||
server to the nearest millisecond appended to them.
|
||||
|
||||
Note that when using `--b2-versions` no file write operations are
|
||||
permitted, so you can't upload files or delete them.
|
||||
@@ -1,12 +1,222 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Changelog"
|
||||
date: "2015-10-03"
|
||||
date: "2016-07-13"
|
||||
---
|
||||
|
||||
Changelog
|
||||
---------
|
||||
|
||||
* v1.32 - 2016-07-13
|
||||
* Backblaze B2
|
||||
* Fix upload of files large files not in root
|
||||
* v1.31 - 2016-07-13
|
||||
* New Features
|
||||
* Reduce memory on sync by about 50%
|
||||
* Implement --no-traverse flag to stop copy traversing the destination remote.
|
||||
* This can be used to reduce memory usage down to the smallest possible.
|
||||
* Useful to copy a small number of files into a large destination folder.
|
||||
* Implement cleanup command for emptying trash / removing old versions of files
|
||||
* Currently B2 only
|
||||
* Single file handling improved
|
||||
* Now copied with --files-from
|
||||
* Automatically sets --no-traverse when copying a single file
|
||||
* Info on using installing with ansible - thanks Stefan Weichinger
|
||||
* Implement --no-update-modtime flag to stop rclone fixing the remote modified times.
|
||||
* Bug Fixes
|
||||
* Fix move command - stop it running for overlapping Fses - this was causing data loss.
|
||||
* Local
|
||||
* Fix incomplete hashes - this was causing problems for B2.
|
||||
* Amazon Drive
|
||||
* Rename Amazon Cloud Drive to Amazon Drive - no changes to config file needed.
|
||||
* Swift
|
||||
* Add support for non-default project domain - thanks Antonio Messina.
|
||||
* S3
|
||||
* Add instructions on how to use rclone with minio.
|
||||
* Add ap-northeast-2 (Seoul) and ap-south-1 (Mumbai) regions.
|
||||
* Skip setting the modified time for objects > 5GB as it isn't possible.
|
||||
* Backblaze B2
|
||||
* Add --b2-versions flag so old versions can be listed and retreived.
|
||||
* Treat 403 errors (eg cap exceeded) as fatal.
|
||||
* Implement cleanup command for deleting old file versions.
|
||||
* Make error handling compliant with B2 integrations notes.
|
||||
* Fix handling of token expiry.
|
||||
* Implement --b2-test-mode to set `X-Bz-Test-Mode` header.
|
||||
* Set cutoff for chunked upload to 200MB as per B2 guidelines.
|
||||
* Make upload multi-threaded.
|
||||
* Dropbox
|
||||
* Don't retry 461 errors.
|
||||
* v1.30 - 2016-06-18
|
||||
* New Features
|
||||
* Directory listing code reworked for more features and better error reporting (thanks to Klaus Post for help). This enables
|
||||
* Directory include filtering for efficiency
|
||||
* --max-depth parameter
|
||||
* Better error reporting
|
||||
* More to come
|
||||
* Retry more errors
|
||||
* Add --ignore-size flag - for uploading images to onedrive
|
||||
* Log -v output to stdout by default
|
||||
* Display the transfer stats in more human readable form
|
||||
* Make 0 size files specifiable with `--max-size 0b`
|
||||
* Add `b` suffix so we can specify bytes in --bwlimit, --min-size etc
|
||||
* Use "password:" instead of "password>" prompt - thanks Klaus Post and Leigh Klotz
|
||||
* Bug Fixes
|
||||
* Fix retry doing one too many retries
|
||||
* Local
|
||||
* Fix problems with OS X and UTF-8 characters
|
||||
* Amazon Drive
|
||||
* Check a file exists before uploading to help with 408 Conflict errors
|
||||
* Reauth on 401 errors - this has been causing a lot of problems
|
||||
* Work around spurious 403 errors
|
||||
* Restart directory listings on error
|
||||
* Google Drive
|
||||
* Check a file exists before uploading to help with duplicates
|
||||
* Fix retry of multipart uploads
|
||||
* Backblaze B2
|
||||
* Implement large file uploading
|
||||
* S3
|
||||
* Add AES256 server-side encryption for - thanks Justin R. Wilson
|
||||
* Google Cloud Storage
|
||||
* Make sure we don't use conflicting content types on upload
|
||||
* Add service account support - thanks Michal Witkowski
|
||||
* Swift
|
||||
* Add auth version parameter
|
||||
* Add domain option for openstack (v3 auth) - thanks Fabian Ruff
|
||||
* v1.29 - 2016-04-18
|
||||
* New Features
|
||||
* Implement `-I, --ignore-times` for unconditional upload
|
||||
* Improve `dedupe`command
|
||||
* Now removes identical copies without asking
|
||||
* Now obeys `--dry-run`
|
||||
* Implement `--dedupe-mode` for non interactive running
|
||||
* `--dedupe-mode interactive` - interactive the default.
|
||||
* `--dedupe-mode skip` - removes identical files then skips anything left.
|
||||
* `--dedupe-mode first` - removes identical files then keeps the first one.
|
||||
* `--dedupe-mode newest` - removes identical files then keeps the newest one.
|
||||
* `--dedupe-mode oldest` - removes identical files then keeps the oldest one.
|
||||
* `--dedupe-mode rename` - removes identical files then renames the rest to be different.
|
||||
* Bug fixes
|
||||
* Make rclone check obey the `--size-only` flag.
|
||||
* Use "application/octet-stream" if discovered mime type is invalid.
|
||||
* Fix missing "quit" option when there are no remotes.
|
||||
* Google Drive
|
||||
* Increase default chunk size to 8 MB - increases upload speed of big files
|
||||
* Speed up directory listings and make more reliable
|
||||
* Add missing retries for Move and DirMove - increases reliability
|
||||
* Preserve mime type on file update
|
||||
* Backblaze B2
|
||||
* Enable mod time syncing
|
||||
* This means that B2 will now check modification times
|
||||
* It will upload new files to update the modification times
|
||||
* (there isn't an API to just set the mod time.)
|
||||
* If you want the old behaviour use `--size-only`.
|
||||
* Update API to new version
|
||||
* Fix parsing of mod time when not in metadata
|
||||
* Swift/Hubic
|
||||
* Don't return an MD5SUM for static large objects
|
||||
* S3
|
||||
* Fix uploading files bigger than 50GB
|
||||
* v1.28 - 2016-03-01
|
||||
* New Features
|
||||
* Configuration file encryption - thanks Klaus Post
|
||||
* Improve `rclone config` adding more help and making it easier to understand
|
||||
* Implement `-u`/`--update` so creation times can be used on all remotes
|
||||
* Implement `--low-level-retries` flag
|
||||
* Optionally disable gzip compression on downloads with `--no-gzip-encoding`
|
||||
* Bug fixes
|
||||
* Don't make directories if `--dry-run` set
|
||||
* Fix and document the `move` command
|
||||
* Fix redirecting stderr on unix-like OSes when using `--log-file`
|
||||
* Fix `delete` command to wait until all finished - fixes missing deletes.
|
||||
* Backblaze B2
|
||||
* Use one upload URL per go routine fixes `more than one upload using auth token`
|
||||
* Add pacing, retries and reauthentication - fixes token expiry problems
|
||||
* Upload without using a temporary file from local (and remotes which support SHA1)
|
||||
* Fix reading metadata for all files when it shouldn't have been
|
||||
* Drive
|
||||
* Fix listing drive documents at root
|
||||
* Disable copy and move for Google docs
|
||||
* Swift
|
||||
* Fix uploading of chunked files with non ASCII characters
|
||||
* Allow setting of `storage_url` in the config - thanks Xavier Lucas
|
||||
* S3
|
||||
* Allow IAM role and credentials from environment variables - thanks Brian Stengaard
|
||||
* Allow low privilege users to use S3 (check if directory exists during Mkdir) - thanks Jakub Gedeon
|
||||
* Amazon Drive
|
||||
* Retry on more things to make directory listings more reliable
|
||||
* v1.27 - 2016-01-31
|
||||
* New Features
|
||||
* Easier headless configuration with `rclone authorize`
|
||||
* Add support for multiple hash types - we now check SHA1 as well as MD5 hashes.
|
||||
* `delete` command which does obey the filters (unlike `purge`)
|
||||
* `dedupe` command to deduplicate a remote. Useful with Google Drive.
|
||||
* Add `--ignore-existing` flag to skip all files that exist on destination.
|
||||
* Add `--delete-before`, `--delete-during`, `--delete-after` flags.
|
||||
* Add `--memprofile` flag to debug memory use.
|
||||
* Warn the user about files with same name but different case
|
||||
* Make `--include` rules add their implict exclude * at the end of the filter list
|
||||
* Deprecate compiling with go1.3
|
||||
* Amazon Drive
|
||||
* Fix download of files > 10 GB
|
||||
* Fix directory traversal ("Next token is expired") for large directory listings
|
||||
* Remove 409 conflict from error codes we will retry - stops very long pauses
|
||||
* Backblaze B2
|
||||
* SHA1 hashes now checked by rclone core
|
||||
* Drive
|
||||
* Add `--drive-auth-owner-only` to only consider files owned by the user - thanks Björn Harrtell
|
||||
* Export Google documents
|
||||
* Dropbox
|
||||
* Make file exclusion error controllable with -q
|
||||
* Swift
|
||||
* Fix upload from unprivileged user.
|
||||
* S3
|
||||
* Fix updating of mod times of files with `+` in.
|
||||
* Local
|
||||
* Add local file system option to disable UNC on Windows.
|
||||
* v1.26 - 2016-01-02
|
||||
* New Features
|
||||
* Yandex storage backend - thank you Dmitry Burdeev ("dibu")
|
||||
* Implement Backblaze B2 storage backend
|
||||
* Add --min-age and --max-age flags - thank you Adriano Aurélio Meirelles
|
||||
* Make ls/lsl/md5sum/size/check obey includes and excludes
|
||||
* Fixes
|
||||
* Fix crash in http logging
|
||||
* Upload releases to github too
|
||||
* Swift
|
||||
* Fix sync for chunked files
|
||||
* One Drive
|
||||
* Re-enable server side copy
|
||||
* Don't mask HTTP error codes with JSON decode error
|
||||
* S3
|
||||
* Fix corrupting Content-Type on mod time update (thanks Joseph Spurrier)
|
||||
* v1.25 - 2015-11-14
|
||||
* New features
|
||||
* Implement Hubic storage system
|
||||
* Fixes
|
||||
* Fix deletion of some excluded files without --delete-excluded
|
||||
* This could have deleted files unexpectedly on sync
|
||||
* Always check first with `--dry-run`!
|
||||
* Swift
|
||||
* Stop SetModTime losing metadata (eg X-Object-Manifest)
|
||||
* This could have caused data loss for files > 5GB in size
|
||||
* Use ContentType from Object to avoid lookups in listings
|
||||
* One Drive
|
||||
* disable server side copy as it seems to be broken at Microsoft
|
||||
* v1.24 - 2015-11-07
|
||||
* New features
|
||||
* Add support for Microsoft One Drive
|
||||
* Add `--no-check-certificate` option to disable server certificate verification
|
||||
* Add async readahead buffer for faster transfer of big files
|
||||
* Fixes
|
||||
* Allow spaces in remotes and check remote names for validity at creation time
|
||||
* Allow '&' and disallow ':' in Windows filenames.
|
||||
* Swift
|
||||
* Ignore directory marker objects where appropriate - allows working with Hubic
|
||||
* Don't delete the container if fs wasn't at root
|
||||
* S3
|
||||
* Don't delete the bucket if fs wasn't at root
|
||||
* Google Cloud Storage
|
||||
* Don't delete the bucket if fs wasn't at root
|
||||
* v1.23 - 2015-10-03
|
||||
* New features
|
||||
* Implement `rclone size` for measuring remotes
|
||||
@@ -29,13 +239,13 @@ Changelog
|
||||
* Make lsl output times in localtime
|
||||
* Fixes
|
||||
* Fix allowing user to override credentials again in Drive, GCS and ACD
|
||||
* Amazon Cloud Drive
|
||||
* Amazon Drive
|
||||
* Implement compliant pacing scheme
|
||||
* Google Drive
|
||||
* Make directory reads concurrent for increased speed.
|
||||
* v1.20 - 2015-09-15
|
||||
* New features
|
||||
* Amazon Cloud Drive support
|
||||
* Amazon Drive support
|
||||
* Oauth support redone - fix many bugs and improve usability
|
||||
* Use "golang.org/x/oauth2" as oauth libary of choice
|
||||
* Improve oauth usability for smoother initial signup
|
||||
|
||||
@@ -25,6 +25,11 @@ See the following for detailed instructions for
|
||||
* [Dropbox](/dropbox/)
|
||||
* [Google Cloud Storage](/googlecloudstorage/)
|
||||
* [Local filesystem](/local/)
|
||||
* [Amazon Drive](/amazonclouddrive/)
|
||||
* [Backblaze B2](/b2/)
|
||||
* [Hubic](/hubic/)
|
||||
* [Microsoft One Drive](/onedrive/)
|
||||
* [Yandex Disk](/yandex/)
|
||||
|
||||
Usage
|
||||
-----
|
||||
@@ -50,17 +55,83 @@ Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing by size and modification time or
|
||||
MD5SUM. Doesn't delete files from the destination.
|
||||
|
||||
Note that it is always the contents of the directory that is synced,
|
||||
not the directory so when source:path is a directory, it's the
|
||||
contents of source:path that are copied, not the directory name and
|
||||
contents.
|
||||
|
||||
If dest:path doesn't exist, it is created and the source:path contents
|
||||
go there.
|
||||
|
||||
For example
|
||||
|
||||
rclone copy source:sourcepath dest:destpath
|
||||
|
||||
Let's say there are two files in sourcepath
|
||||
|
||||
sourcepath/one.txt
|
||||
sourcepath/two.txt
|
||||
|
||||
This copies them to
|
||||
|
||||
destpath/one.txt
|
||||
destpath/two.txt
|
||||
|
||||
Not to
|
||||
|
||||
destpath/sourcepath/one.txt
|
||||
destpath/sourcepath/two.txt
|
||||
|
||||
If you are familiar with `rsync`, rclone always works as if you had
|
||||
written a trailing / - meaning "copy the contents of this directory".
|
||||
This applies to all commands and whether you are talking about the
|
||||
source or destination.
|
||||
|
||||
See the `--no-traverse` option for controlling whether rclone lists
|
||||
the destination directory or not.
|
||||
|
||||
### rclone sync source:path dest:path ###
|
||||
|
||||
Sync the source to the destination, changing the destination
|
||||
only. Doesn't transfer unchanged files, testing by size and
|
||||
modification time or MD5SUM. Destination is updated to match
|
||||
source, including deleting files if necessary. Since this can
|
||||
cause data loss, test first with the `--dry-run` flag.
|
||||
source, including deleting files if necessary.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
`--dry-run` flag to see exactly what would be copied and deleted.
|
||||
|
||||
Note that files in the destination won't be deleted if there were any
|
||||
errors at any point.
|
||||
|
||||
It is always the contents of the directory that is synced, not the
|
||||
directory so when source:path is a directory, it's the contents of
|
||||
source:path that are copied, not the directory name and contents. See
|
||||
extended explanation in the `copy` command above if unsure.
|
||||
|
||||
If dest:path doesn't exist, it is created and the source:path contents
|
||||
go there.
|
||||
|
||||
### move source:path dest:path ###
|
||||
|
||||
Moves the contents of the source directory to the destination
|
||||
directory. Rclone will error if the source and destination overlap.
|
||||
|
||||
If no filters are in use and if possible this will server side move
|
||||
`source:path` into `dest:path`. After this `source:path` will no
|
||||
longer longer exist.
|
||||
|
||||
Otherwise for each file in `source:path` selected by the filters (if
|
||||
any) this will move it into `dest:path`. If possible a server side
|
||||
move will be used, otherwise it will copy it (server side if possible)
|
||||
into `dest:path` then delete the original (if no errors on copy) in
|
||||
`source:path`.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
--dry-run flag.
|
||||
|
||||
### rclone ls remote:path ###
|
||||
|
||||
List all the objects in the the path with size and path.
|
||||
List all the objects in the path with size and path.
|
||||
|
||||
### rclone lsd remote:path ###
|
||||
|
||||
@@ -76,6 +147,11 @@ size and path.
|
||||
Produces an md5sum file for all the objects in the path. This
|
||||
is in the same format as the standard md5sum tool produces.
|
||||
|
||||
### rclone sha1sum remote:path ###
|
||||
|
||||
Produces an sha1sum file for all the objects in the path. This
|
||||
is in the same format as the standard sha1sum tool produces.
|
||||
|
||||
### rclone size remote:path ###
|
||||
|
||||
Prints the total size of objects in remote:path and the number of
|
||||
@@ -92,7 +168,28 @@ objects in it, use purge for that.
|
||||
|
||||
### rclone purge remote:path ###
|
||||
|
||||
Remove the path and all of its contents.
|
||||
Remove the path and all of its contents. Note that this does not obey
|
||||
include/exclude filters - everything will be removed. Use `delete` if
|
||||
you want to selectively delete files.
|
||||
|
||||
### rclone delete remote:path ###
|
||||
|
||||
Remove the contents of path. Unlike `purge` it obeys include/exclude
|
||||
filters so can be used to selectively delete files.
|
||||
|
||||
Eg delete all files bigger than 100MBytes
|
||||
|
||||
Check what would be deleted first (use either)
|
||||
|
||||
rclone --min-size 100M lsl remote:path
|
||||
rclone --dry-run --min-size 100M delete remote:path
|
||||
|
||||
Then delete
|
||||
|
||||
rclone --min-size 100M delete remote:path
|
||||
|
||||
That reads "delete everything with a minimum size of 100 MB", hence
|
||||
delete all files bigger than 100MBytes.
|
||||
|
||||
### rclone check source:path dest:path ###
|
||||
|
||||
@@ -100,6 +197,92 @@ Checks the files in the source and destination match. It
|
||||
compares sizes and MD5SUMs and prints a report of files which
|
||||
don't match. It doesn't alter the source or destination.
|
||||
|
||||
`--size-only` may be used to only compare the sizes, not the MD5SUMs.
|
||||
|
||||
### rclone cleanup remote:path ###
|
||||
|
||||
Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
|
||||
### rclone dedupe remote:path ###
|
||||
|
||||
By default `dedup` interactively finds duplicate files and offers to
|
||||
delete all but one or rename them to be different. Only useful with
|
||||
Google Drive which can have duplicate file names.
|
||||
|
||||
The `dedupe` command will delete all but one of any identical (same
|
||||
md5sum) files it finds without confirmation. This means that for most
|
||||
duplicated files the `dedupe` command will not be interactive. You
|
||||
can use `--dry-run` to see what would happen without doing anything.
|
||||
|
||||
Here is an example run.
|
||||
|
||||
Before - with duplicates
|
||||
|
||||
```
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
6048320 2016-03-05 16:23:11.775000000 one.txt
|
||||
564374 2016-03-05 16:23:06.731000000 one.txt
|
||||
6048320 2016-03-05 16:18:26.092000000 one.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two.txt
|
||||
```
|
||||
|
||||
Now the `dedupe` session
|
||||
|
||||
```
|
||||
$ rclone dedupe drive:dupes
|
||||
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
||||
one.txt: Found 4 duplicates - deleting identical copies
|
||||
one.txt: Deleting 2/3 identical duplicates (md5sum "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||
one.txt: 2 duplicates remain
|
||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> k
|
||||
Enter the number of the file to keep> 1
|
||||
one.txt: Deleted 1 extra copies
|
||||
two.txt: Found 3 duplicates - deleting identical copies
|
||||
two.txt: 3 duplicates remain
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, md5sum 851957f7fb6f0bc4ce76be966d336802
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> r
|
||||
two-1.txt: renamed from: two.txt
|
||||
two-2.txt: renamed from: two.txt
|
||||
two-3.txt: renamed from: two.txt
|
||||
```
|
||||
|
||||
The result being
|
||||
|
||||
```
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two-1.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two-2.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two-3.txt
|
||||
```
|
||||
|
||||
Dedupe can be run non interactively using the `--dedupe-mode` flag.
|
||||
|
||||
* `--dedupe-mode interactive` - interactive as above.
|
||||
* `--dedupe-mode skip` - removes identical files then skips anything left.
|
||||
* `--dedupe-mode first` - removes identical files then keeps the first one.
|
||||
* `--dedupe-mode newest` - removes identical files then keeps the newest one.
|
||||
* `--dedupe-mode oldest` - removes identical files then keeps the oldest one.
|
||||
* `--dedupe-mode rename` - removes identical files then renames the rest to be different.
|
||||
|
||||
For example to rename all the identically named photos in your Google Photos directory, do
|
||||
|
||||
rclone dedupe --dedupe-mode rename "drive:Google Photos"
|
||||
|
||||
### rclone config ###
|
||||
|
||||
Enter an interactive configuration session.
|
||||
@@ -108,6 +291,69 @@ Enter an interactive configuration session.
|
||||
|
||||
Prints help on rclone commands and options.
|
||||
|
||||
Copying single files
|
||||
--------------------
|
||||
|
||||
rclone normally syncs or copies directories. However if the source
|
||||
remote points to a file, rclone will just copy that file. The
|
||||
destination remote must point to a directory - rclone will give the
|
||||
error `Failed to create file system for "remote:file": is a file not a
|
||||
directory` if it isn't.
|
||||
|
||||
For example, suppose you have a remote with a file in called
|
||||
`test.jpg`, then you could copy just that file like this
|
||||
|
||||
rclone copy remote:test.jpg /tmp/download
|
||||
|
||||
The file `test.jpg` will be placed inside `/tmp/download`.
|
||||
|
||||
This is equivalent to specifying
|
||||
|
||||
rclone copy --no-traverse --files-from /tmp/files remote: /tmp/download
|
||||
|
||||
Where `/tmp/files` contains the single line
|
||||
|
||||
test.jpg
|
||||
|
||||
It is recommended to use `copy` when copying single files not `sync`.
|
||||
They have pretty much the same effect but `copy` will use a lot less
|
||||
memory.
|
||||
|
||||
Quoting and the shell
|
||||
---------------------
|
||||
|
||||
When you are typing commands to your computer you are using something
|
||||
called the command line shell. This interprets various characters in
|
||||
an OS specific way.
|
||||
|
||||
Here are some gotchas which may help users unfamiliar with the shell rules
|
||||
|
||||
### Linux / OSX ###
|
||||
|
||||
If your names have spaces or shell metacharacters (eg `*`, `?`, `$`,
|
||||
`'`, `"` etc) then you must quote them. Use single quotes `'` by default.
|
||||
|
||||
rclone copy 'Important files?' remote:backup
|
||||
|
||||
If you want to send a `'` you will need to use `"`, eg
|
||||
|
||||
rclone copy "O'Reilly Reviews" remote:backup
|
||||
|
||||
The rules for quoting metacharacters are complicated and if you want
|
||||
the full details you'll have to consult the manual page for your
|
||||
shell.
|
||||
|
||||
### Windows ###
|
||||
|
||||
If your names have spaces in you need to put them in `"`, eg
|
||||
|
||||
rclone copy "E:\folder name\folder name\folder name" remote:backup
|
||||
|
||||
If you are using the root directory on its own then don't quote it
|
||||
(see [#464](https://github.com/ncw/rclone/issues/464) for why), eg
|
||||
|
||||
rclone copy E:\ remote:backup
|
||||
|
||||
Server Side Copy
|
||||
----------------
|
||||
|
||||
@@ -149,13 +395,14 @@ possibly signed sequence of decimal numbers, each with optional
|
||||
fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid
|
||||
time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
|
||||
Options which use SIZE use kByte by default. However a suffix of `k`
|
||||
for kBytes, `M` for MBytes and `G` for GBytes may be used. These are
|
||||
the binary units, eg 2**10, 2**20, 2**30 respectively.
|
||||
Options which use SIZE use kByte by default. However a suffix of `b`
|
||||
for bytes, `k` for kBytes, `M` for MBytes and `G` for GBytes may be
|
||||
used. These are the binary units, eg 1, 2\*\*10, 2\*\*20, 2\*\*30
|
||||
respectively.
|
||||
|
||||
### --bwlimit=SIZE ###
|
||||
|
||||
Bandwidth limit in kBytes/s, or use suffix k|M|G. The default is `0`
|
||||
Bandwidth limit in kBytes/s, or use suffix b|k|M|G. The default is `0`
|
||||
which means to not limit bandwidth.
|
||||
|
||||
For example to limit bandwidth usage to 10 MBytes/s use `--bwlimit 10M`
|
||||
@@ -176,11 +423,15 @@ The default is to run 8 checkers in parallel.
|
||||
|
||||
Normally rclone will look at modification time and size of files to
|
||||
see if they are equal. If you set this flag then rclone will check
|
||||
MD5SUM and size to determine if files are equal.
|
||||
the file hash and size to determine if files are equal.
|
||||
|
||||
This is useful when the remote doesn't support setting modified time
|
||||
and a more accurate sync is desired than just checking the file size.
|
||||
|
||||
This is very useful when transferring between remotes which store the
|
||||
MD5SUM on the object which include swift, s3, drive, and google cloud
|
||||
storage.
|
||||
same hash type on the object, eg Drive and Swift. For details of which
|
||||
remotes support which hash type see the table in the [overview
|
||||
section](/overview/).
|
||||
|
||||
Eg `rclone --checksum sync s3:/bucket swift:/bucket` would run much
|
||||
quicker than without the `--checksum` flag.
|
||||
@@ -205,17 +456,88 @@ The connection timeout is the amount of time rclone will wait for a
|
||||
connection to go through to a remote object storage system. It is
|
||||
`1m` by default.
|
||||
|
||||
### --dedupe-mode MODE ###
|
||||
|
||||
Mode to run dedupe command in. One of `interactive`, `skip`, `first`, `newest`, `oldest`, `rename`. The default is `interactive`. See the dedupe command for more information as to what these options mean.
|
||||
|
||||
### -n, --dry-run ###
|
||||
|
||||
Do a trial run with no permanent changes. Use this in combination
|
||||
with the `-v` flag to see what rclone would do without actually doing
|
||||
it. Useful when setting up the `sync` command.
|
||||
Do a trial run with no permanent changes. Use this to see what rclone
|
||||
would do without actually doing it. Useful when setting up the `sync`
|
||||
command which deletes files in the destination.
|
||||
|
||||
### --ignore-existing ###
|
||||
|
||||
Using this option will make rclone unconditionally skip all files
|
||||
that exist on the destination, no matter the content of these files.
|
||||
|
||||
While this isn't a generally recommended option, it can be useful
|
||||
in cases where your files change due to encryption. However, it cannot
|
||||
correct partial transfers in case a transfer was interrupted.
|
||||
|
||||
### --ignore-size ###
|
||||
|
||||
Normally rclone will look at modification time and size of files to
|
||||
see if they are equal. If you set this flag then rclone will check
|
||||
only the modification time. If `--checksum` is set then it only
|
||||
checks the checksum.
|
||||
|
||||
It will also cause rclone to skip verifying the sizes are the same
|
||||
after transfer.
|
||||
|
||||
This can be useful for transferring files to and from onedrive which
|
||||
occasionally misreports the size of image files (see
|
||||
[#399](https://github.com/ncw/rclone/issues/399) for more info).
|
||||
|
||||
### -I, --ignore-times ###
|
||||
|
||||
Using this option will cause rclone to unconditionally upload all
|
||||
files regardless of the state of files on the destination.
|
||||
|
||||
Normally rclone would skip any files that have the same
|
||||
modification time and are the same size (or have the same checksum if
|
||||
using `--checksum`).
|
||||
|
||||
### --log-file=FILE ###
|
||||
|
||||
Log all of rclone's output to FILE. This is not active by default.
|
||||
This can be useful for tracking down problems with syncs in
|
||||
combination with the `-v` flag.
|
||||
combination with the `-v` flag. See the Logging section for more
|
||||
info.
|
||||
|
||||
### --low-level-retries NUMBER ###
|
||||
|
||||
This controls the number of low level retries rclone does.
|
||||
|
||||
A low level retry is used to retry a failing operation - typically one
|
||||
HTTP request. This might be uploading a chunk of a big file for
|
||||
example. You will see low level retries in the log with the `-v`
|
||||
flag.
|
||||
|
||||
This shouldn't need to be changed from the default in normal
|
||||
operations, however if you get a lot of low level retries you may wish
|
||||
to reduce the value so rclone moves on to a high level retry (see the
|
||||
`--retries` flag) quicker.
|
||||
|
||||
Disable low level retries with `--low-level-retries 1`.
|
||||
|
||||
### --max-depth=N ###
|
||||
|
||||
This modifies the recursion depth for all the commands except purge.
|
||||
|
||||
So if you do `rclone --max-depth 1 ls remote:path` you will see only
|
||||
the files in the top level directory. Using `--max-depth 2` means you
|
||||
will see all the files in first two directory levels and so on.
|
||||
|
||||
For historical reasons the `lsd` command defaults to using a
|
||||
`--max-depth` of 1 - you can override this with the command line flag.
|
||||
|
||||
You can use this command to disable recursion (with `--max-depth 1`).
|
||||
|
||||
Note that if you use this with `sync` and `--delete-excluded` the
|
||||
files not recursed through are considered excluded and will be deleted
|
||||
on the destination. Test first with `--dry-run` if you are not sure
|
||||
what will happen.
|
||||
|
||||
### --modify-window=TIME ###
|
||||
|
||||
@@ -230,11 +552,38 @@ if you are reading and writing to an OS X filing system this will be
|
||||
|
||||
This command line flag allows you to override that computed default.
|
||||
|
||||
### --no-gzip-encoding ###
|
||||
|
||||
Don't set `Accept-Encoding: gzip`. This means that rclone won't ask
|
||||
the server for compressed files automatically. Useful if you've set
|
||||
the server to return files with `Content-Encoding: gzip` but you
|
||||
uploaded compressed files.
|
||||
|
||||
There is no need to set this in normal operation, and doing so will
|
||||
decrease the network transfer efficiency of rclone.
|
||||
|
||||
### --no-update-modtime ###
|
||||
|
||||
When using this flag, rclone won't update modification times of remote
|
||||
files if they are incorrect as it would normally.
|
||||
|
||||
This can be used if the remote is being synced with another tool also
|
||||
(eg the Google Drive client).
|
||||
|
||||
### -q, --quiet ###
|
||||
|
||||
Normally rclone outputs stats and a completion message. If you set
|
||||
this flag it will make as little output as possible.
|
||||
|
||||
### --retries int ###
|
||||
|
||||
Retry the entire sync if it fails this many times it fails (default 3).
|
||||
|
||||
Some remotes can be unreliable and a few retries helps pick up the
|
||||
files which didn't get transferred because of errors.
|
||||
|
||||
Disable retries with `--retries 1`.
|
||||
|
||||
### --size-only ###
|
||||
|
||||
Normally rclone will look at modification time and size of files to
|
||||
@@ -245,9 +594,6 @@ This can be useful transferring files from dropbox which have been
|
||||
modified by the desktop sync client which doesn't set checksums of
|
||||
modification times in the same way as rclone.
|
||||
|
||||
When using this flag, rclone won't update mtimes of remote files if
|
||||
they are incorrect as it would normally.
|
||||
|
||||
### --stats=TIME ###
|
||||
|
||||
Rclone will print stats at regular intervals to show its progress.
|
||||
@@ -256,6 +602,24 @@ This sets the interval.
|
||||
|
||||
The default is `1m`. Use 0 to disable.
|
||||
|
||||
### --delete-(before,during,after) ###
|
||||
|
||||
This option allows you to specify when files on your destination are
|
||||
deleted when you sync folders.
|
||||
|
||||
Specifying the value `--delete-before` will delete all files present
|
||||
on the destination, but not on the source *before* starting the
|
||||
transfer of any new or updated files. This uses extra memory as it
|
||||
has to store the source listing before proceeding.
|
||||
|
||||
Specifying `--delete-during` (default value) will delete files while
|
||||
checking and uploading files. This is usually the fastest option.
|
||||
Currently this works the same as `--delete-after` but it may change in
|
||||
the future.
|
||||
|
||||
Specifying `--delete-after` will delay deletion of files until all new/updated
|
||||
files have been successfully transfered.
|
||||
|
||||
### --timeout=TIME ###
|
||||
|
||||
This sets the IO idle timeout. If a transfer has started but then
|
||||
@@ -271,6 +635,25 @@ of timeouts or bigger if you have lots of bandwidth and a fast remote.
|
||||
|
||||
The default is to run 4 file transfers in parallel.
|
||||
|
||||
### -u, --update ###
|
||||
|
||||
This forces rclone to skip any files which exist on the destination
|
||||
and have a modified time that is newer than the source file.
|
||||
|
||||
If an existing destination file has a modification time equal (within
|
||||
the computed modify window precision) to the source file's, it will be
|
||||
updated if the sizes are different.
|
||||
|
||||
On remotes which don't support mod time directly the time checked will
|
||||
be the uploaded time. This means that if uploading to one of these
|
||||
remoes, rclone will skip any files which exist on the destination and
|
||||
have an uploaded time that is newer than the modification time of the
|
||||
source file.
|
||||
|
||||
This can be useful when transferring to a remote which doesn't support
|
||||
mod times directly as it is more accurate than a `--size-only` check
|
||||
and faster than using `--checksum`.
|
||||
|
||||
### -v, --verbose ###
|
||||
|
||||
If you set this flag, rclone will become very verbose telling you
|
||||
@@ -282,17 +665,138 @@ Very useful for debugging.
|
||||
|
||||
Prints the version number
|
||||
|
||||
Configuration Encryption
|
||||
------------------------
|
||||
Your configuration file contains information for logging in to
|
||||
your cloud services. This means that you should keep your
|
||||
`.rclone.conf` file in a secure location.
|
||||
|
||||
If you are in an environment where that isn't possible, you can
|
||||
add a password to your configuration. This means that you will
|
||||
have to enter the password every time you start rclone.
|
||||
|
||||
To add a password to your rclone configuration, execute `rclone config`.
|
||||
|
||||
```
|
||||
>rclone config
|
||||
Current remotes:
|
||||
|
||||
e) Edit existing remote
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
e/n/d/s/q>
|
||||
```
|
||||
|
||||
Go into `s`, Set configuration password:
|
||||
```
|
||||
e/n/d/s/q> s
|
||||
Your configuration is not encrypted.
|
||||
If you add a password, you will protect your login information to cloud services.
|
||||
a) Add Password
|
||||
q) Quit to main menu
|
||||
a/q> a
|
||||
Enter NEW configuration password:
|
||||
password:
|
||||
Confirm NEW password:
|
||||
password:
|
||||
Password set
|
||||
Your configuration is encrypted.
|
||||
c) Change Password
|
||||
u) Unencrypt configuration
|
||||
q) Quit to main menu
|
||||
c/u/q>
|
||||
```
|
||||
|
||||
Your configuration is now encrypted, and every time you start rclone
|
||||
you will now be asked for the password. In the same menu you can
|
||||
change the password or completely remove encryption from your
|
||||
configuration.
|
||||
|
||||
There is no way to recover the configuration if you lose your password.
|
||||
|
||||
rclone uses [nacl secretbox](https://godoc.org/golang.org/x/crypto/nacl/secretbox)
|
||||
which in turn uses XSalsa20 and Poly1305 to encrypt and authenticate
|
||||
your configuration with secret-key cryptography.
|
||||
The password is SHA-256 hashed, which produces the key for secretbox.
|
||||
The hashed password is not stored.
|
||||
|
||||
While this provides very good security, we do not recommend storing
|
||||
your encrypted rclone configuration in public if it contains sensitive
|
||||
information, maybe except if you use a very strong password.
|
||||
|
||||
If it is safe in your environment, you can set the `RCLONE_CONFIG_PASS`
|
||||
environment variable to contain your password, in which case it will be
|
||||
used for decrypting the configuration.
|
||||
|
||||
If you are running rclone inside a script, you might want to disable
|
||||
password prompts. To do that, pass the parameter
|
||||
`--ask-password=false` to rclone. This will make rclone fail instead
|
||||
of asking for a password if `RCLONE_CONFIG_PASS` doesn't contain
|
||||
a valid password.
|
||||
|
||||
|
||||
Developer options
|
||||
-----------------
|
||||
|
||||
These options are useful when developing or debugging rclone. There
|
||||
are also some more remote specific options which aren't documented
|
||||
here which are used for testing. These start with remote name eg
|
||||
`--drive-test-option`.
|
||||
`--drive-test-option` - see the docs for the remote in question.
|
||||
|
||||
### --cpuprofile=FILE ###
|
||||
|
||||
Write cpu profile to file. This can be analysed with `go tool pprof`.
|
||||
Write CPU profile to file. This can be analysed with `go tool pprof`.
|
||||
|
||||
### --dump-bodies ###
|
||||
|
||||
Dump HTTP headers and bodies - may contain sensitive info. Can be
|
||||
very verbose. Useful for debugging only.
|
||||
|
||||
### --dump-filters ###
|
||||
|
||||
Dump the filters to the output. Useful to see exactly what include
|
||||
and exclude options are filtering on.
|
||||
|
||||
### --dump-headers ###
|
||||
|
||||
Dump HTTP headers - may contain sensitive info. Can be very verbose.
|
||||
Useful for debugging only.
|
||||
|
||||
### --memprofile=FILE ###
|
||||
|
||||
Write memory profile to file. This can be analysed with `go tool pprof`.
|
||||
|
||||
### --no-check-certificate=true/false ###
|
||||
|
||||
`--no-check-certificate` controls whether a client verifies the
|
||||
server's certificate chain and host name.
|
||||
If `--no-check-certificate` is true, TLS accepts any certificate
|
||||
presented by the server and any host name in that certificate.
|
||||
In this mode, TLS is susceptible to man-in-the-middle attacks.
|
||||
|
||||
This option defaults to `false`.
|
||||
|
||||
**This should be used only for testing.**
|
||||
|
||||
### --no-traverse ###
|
||||
|
||||
The `--no-traverse` flag controls whether the destination file system
|
||||
is traversed when using the `copy` or `move` commands.
|
||||
|
||||
If you are only copying a small number of files and/or have a large
|
||||
number of files on the destination then `--no-traverse` will stop
|
||||
rclone listing the destination and save time.
|
||||
|
||||
However if you are copying a large number of files, escpecially if you
|
||||
are doing a copy where lots of the files haven't changed and won't
|
||||
need copying then you shouldn't use `--no-traverse`.
|
||||
|
||||
It can also be used to reduce the memory usage of rclone when copying
|
||||
- `rclone --no-traverse copy src dst` won't load either the source or
|
||||
destination listings into memory so will use the minimum amount of
|
||||
memory.
|
||||
|
||||
Filtering
|
||||
---------
|
||||
@@ -309,6 +813,35 @@ For the filtering options
|
||||
* `--files-from`
|
||||
* `--min-size`
|
||||
* `--max-size`
|
||||
* `--min-age`
|
||||
* `--max-age`
|
||||
* `--dump-filters`
|
||||
|
||||
See the [filtering section](/filtering/).
|
||||
|
||||
Logging
|
||||
-------
|
||||
|
||||
rclone has 3 levels of logging, `Error`, `Info` and `Debug`.
|
||||
|
||||
By default rclone logs `Error` and `Info` to standard error and `Debug`
|
||||
to standard output. This means you can redirect standard output and
|
||||
standard error to different places.
|
||||
|
||||
By default rclone will produce `Error` and `Info` level messages.
|
||||
|
||||
If you use the `-q` flag, rclone will only produce `Error` messages.
|
||||
|
||||
If you use the `-v` flag, rclone will produce `Error`, `Info` and
|
||||
`Debug` messages.
|
||||
|
||||
If you use the `--log-file=FILE` option, rclone will redirect `Error`,
|
||||
`Info` and `Debug` messages along with standard error to FILE.
|
||||
|
||||
Exit Code
|
||||
---------
|
||||
|
||||
If any errors occurred during the command, rclone will set a non zero
|
||||
exit code. This allows scripts to detect when rclone operations have
|
||||
failed.
|
||||
|
||||
|
||||
@@ -2,38 +2,40 @@
|
||||
title: "Rclone downloads"
|
||||
description: "Download rclone binaries for your OS."
|
||||
type: page
|
||||
date: "2015-10-03"
|
||||
date: "2016-07-13"
|
||||
---
|
||||
|
||||
Rclone Download v1.23
|
||||
Rclone Download v1.32
|
||||
=====================
|
||||
|
||||
* Windows
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.23-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-windows-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.32-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.32-windows-amd64.zip)
|
||||
* OSX
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.23-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-osx-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.32-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.32-osx-amd64.zip)
|
||||
* Linux
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.23-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.23-linux-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.32-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.32-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.32-linux-arm.zip)
|
||||
* FreeBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.23-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.23-freebsd-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.32-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.32-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.32-freebsd-arm.zip)
|
||||
* NetBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.23-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.23-netbsd-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.32-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.32-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.32-netbsd-arm.zip)
|
||||
* OpenBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.23-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-openbsd-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.32-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.32-openbsd-amd64.zip)
|
||||
* Plan 9
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.23-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-plan9-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.32-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.32-plan9-amd64.zip)
|
||||
* Solaris
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.23-solaris-amd64.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.32-solaris-amd64.zip)
|
||||
|
||||
You can also find a [mirror of the downloads on github](https://github.com/ncw/rclone/releases/tag/v1.32).
|
||||
|
||||
Downloads for scripting
|
||||
=======================
|
||||
|
||||
@@ -35,6 +35,8 @@ Rclone Download VERSION
|
||||
* Solaris
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-solaris-amd64.zip)
|
||||
|
||||
You can also find a [mirror of the downloads on github](https://github.com/ncw/rclone/releases/tag/VERSION).
|
||||
|
||||
Downloads for scripting
|
||||
=======================
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Google drive"
|
||||
description: "Rclone docs for Google drive"
|
||||
date: "2015-09-12"
|
||||
date: "2016-04-12"
|
||||
---
|
||||
|
||||
<i class="fa fa-google"></i> Google Drive
|
||||
@@ -27,13 +27,31 @@ d) Delete remote
|
||||
q) Quit config
|
||||
e/n/d/q> n
|
||||
name> remote
|
||||
What type of source is it?
|
||||
Choose a number from below
|
||||
1) swift
|
||||
2) s3
|
||||
3) local
|
||||
4) drive
|
||||
type> 4
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Dropbox
|
||||
\ "dropbox"
|
||||
5 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
6 / Google Drive
|
||||
\ "drive"
|
||||
7 / Hubic
|
||||
\ "hubic"
|
||||
8 / Local Disk
|
||||
\ "local"
|
||||
9 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
11 / Yandex Disk
|
||||
\ "yandex"
|
||||
Storage> 6
|
||||
Google Application Client Id - leave blank normally.
|
||||
client_id>
|
||||
Google Application Client Secret - leave blank normally.
|
||||
@@ -104,6 +122,82 @@ By default rclone will delete files permanently when requested. If
|
||||
sending them to the trash is required instead then use the
|
||||
`--drive-use-trash` flag.
|
||||
|
||||
### Specific options ###
|
||||
|
||||
Here are the command line options specific to this cloud storage
|
||||
system.
|
||||
|
||||
#### --drive-chunk-size=SIZE ####
|
||||
|
||||
Upload chunk size. Must a power of 2 >= 256k. Default value is 8 MB.
|
||||
|
||||
Making this larger will improve performance, but note that each chunk
|
||||
is buffered in memory one per transfer.
|
||||
|
||||
Reducing this will reduce memory usage but decrease performance.
|
||||
|
||||
#### --drive-full-list ####
|
||||
|
||||
No longer does anything - kept for backwards compatibility.
|
||||
|
||||
#### --drive-upload-cutoff=SIZE ####
|
||||
|
||||
File size cutoff for switching to chunked upload. Default is 8 MB.
|
||||
|
||||
#### --drive-use-trash ####
|
||||
|
||||
Send files to the trash instead of deleting permanently. Defaults to
|
||||
off, namely deleting files permanently.
|
||||
|
||||
#### --drive-auth-owner-only ####
|
||||
|
||||
Only consider files owned by the authenticated user. Requires
|
||||
that --drive-full-list=true (default).
|
||||
|
||||
#### --drive-formats ####
|
||||
|
||||
Google documents can only be exported from Google drive. When rclone
|
||||
downloads a Google doc it chooses a format to download depending upon
|
||||
this setting.
|
||||
|
||||
By default the formats are `docx,xlsx,pptx,svg` which are a sensible
|
||||
default for an editable document.
|
||||
|
||||
When choosing a format, rclone runs down the list provided in order
|
||||
and chooses the first file format the doc can be exported as from the
|
||||
list. If the file can't be exported to a format on the formats list,
|
||||
then rclone will choose a format from the default list.
|
||||
|
||||
If you prefer an archive copy then you might use `--drive-formats
|
||||
pdf`, or if you prefer openoffice/libreoffice formats you might use
|
||||
`--drive-formats ods,odt`.
|
||||
|
||||
Note that rclone adds the extension to the google doc, so if it is
|
||||
calles `My Spreadsheet` on google docs, it will be exported as `My
|
||||
Spreadsheet.xlsx` or `My Spreadsheet.pdf` etc.
|
||||
|
||||
Here are the possible extensions with their corresponding mime types.
|
||||
|
||||
| Extension | Mime Type | Description |
|
||||
| --------- |-----------| ------------|
|
||||
| csv | text/csv | Standard CSV format for Spreadsheets |
|
||||
| doc | application/msword | Micosoft Office Document |
|
||||
| docx | application/vnd.openxmlformats-officedocument.wordprocessingml.document | Microsoft Office Document |
|
||||
| html | text/html | An HTML Document |
|
||||
| jpg | image/jpeg | A JPEG Image File |
|
||||
| ods | application/vnd.oasis.opendocument.spreadsheet | Openoffice Spreadsheet |
|
||||
| ods | application/x-vnd.oasis.opendocument.spreadsheet | Openoffice Spreadsheet |
|
||||
| odt | application/vnd.oasis.opendocument.text | Openoffice Document |
|
||||
| pdf | application/pdf | Adobe PDF Format |
|
||||
| png | image/png | PNG Image Format|
|
||||
| pptx | application/vnd.openxmlformats-officedocument.presentationml.presentation | Microsoft Office Powerpoint |
|
||||
| rtf | application/rtf | Rich Text Format |
|
||||
| svg | image/svg+xml | Scalable Vector Graphics Format |
|
||||
| txt | text/plain | Plain Text |
|
||||
| xls | application/vnd.ms-excel | Microsoft Office Spreadsheet |
|
||||
| xlsx | application/vnd.openxmlformats-officedocument.spreadsheetml.sheet | Microsoft Office Spreadsheet |
|
||||
| zip | application/zip | A ZIP file of HTML, Images CSS |
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Drive has quite a lot of rate limiting. This causes rclone to be
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Dropbox"
|
||||
description: "Rclone docs for Dropbox"
|
||||
date: "2014-07-17"
|
||||
date: "2016-02-21"
|
||||
---
|
||||
|
||||
<i class="fa fa-dropbox"></i> Dropbox
|
||||
@@ -28,15 +28,31 @@ d) Delete remote
|
||||
q) Quit config
|
||||
e/n/d/q> n
|
||||
name> remote
|
||||
What type of source is it?
|
||||
Choose a number from below
|
||||
1) swift
|
||||
2) s3
|
||||
3) local
|
||||
4) google cloud storage
|
||||
5) dropbox
|
||||
6) drive
|
||||
type> 5
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Dropbox
|
||||
\ "dropbox"
|
||||
5 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
6 / Google Drive
|
||||
\ "drive"
|
||||
7 / Hubic
|
||||
\ "hubic"
|
||||
8 / Local Disk
|
||||
\ "local"
|
||||
9 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
11 / Yandex Disk
|
||||
\ "yandex"
|
||||
Storage> 4
|
||||
Dropbox App Key - leave blank normally.
|
||||
app_key>
|
||||
Dropbox App Secret - leave blank normally.
|
||||
@@ -73,12 +89,32 @@ To copy a local directory to a dropbox directory called backup
|
||||
|
||||
### Modified time and MD5SUMs ###
|
||||
|
||||
Dropbox doesn't have the capability of storing modification times or
|
||||
MD5SUMs so syncs will effectively have the `--size-only` flag set.
|
||||
Dropbox doesn't provide the ability to set modification times in the
|
||||
V1 public API, so rclone can't support modified time with Dropbox.
|
||||
|
||||
This may change in the future - see these issues for details:
|
||||
|
||||
* [Dropbox V2 API](https://github.com/ncw/rclone/issues/349)
|
||||
* [Allow syncs for remotes that can't set modtime on existing objects](https://github.com/ncw/rclone/issues/348)
|
||||
|
||||
Dropbox doesn't return any sort of checksum (MD5 or SHA1).
|
||||
|
||||
Together that means that syncs to dropbox will effectively have the
|
||||
`--size-only` flag set.
|
||||
|
||||
### Specific options ###
|
||||
|
||||
Here are the command line options specific to this cloud storage
|
||||
system.
|
||||
|
||||
#### --dropbox-chunk-size=SIZE ####
|
||||
|
||||
Upload chunk size. Max 150M. The default is 128MB. Note that this
|
||||
isn't buffered into memory.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that Dropbox is case sensitive so you can't have a file called
|
||||
Note that Dropbox is case insensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
|
||||
There are some file names such as `thumbs.db` which Dropbox can't
|
||||
@@ -86,3 +122,8 @@ store. There is a full list of them in the ["Ignored Files" section
|
||||
of this document](https://www.dropbox.com/en/help/145). Rclone will
|
||||
issue an error message `File name disallowed - not uploading` if it
|
||||
attempt to upload one of those file names, but the sync won't fail.
|
||||
|
||||
If you have more than 10,000 files in a directory then `rclone purge
|
||||
dropbox:dir` will return the error `Failed to purge: There are too
|
||||
many files involved in this operation`. As a work-around do an
|
||||
`rclone delete dropbix:dir` followed by an `rclone rmdir dropbox:dir`.
|
||||
|
||||
@@ -12,31 +12,17 @@ Frequently Asked Questions
|
||||
Yes they do. All the rclone commands (eg `sync`, `copy` etc) will
|
||||
work on all the remote storage systems.
|
||||
|
||||
|
||||
### Can I copy the config from one machine to another ###
|
||||
|
||||
Sure! Rclone stores all of its config in a single file. If you want
|
||||
to find this file, the simplest way is to run `rclone -h` and look at
|
||||
the help for the `--config` flag which will tell you where it is. Eg,
|
||||
the help for the `--config` flag which will tell you where it is.
|
||||
|
||||
```
|
||||
$ rclone -h
|
||||
Sync files and directories to and from local and remote object stores - v1.18.
|
||||
[snip]
|
||||
Options:
|
||||
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix k|M|G
|
||||
--checkers=8: Number of checkers to run in parallel.
|
||||
-c, --checksum=false: Skip based on checksum & size, not mod-time & size
|
||||
--config="/home/user/.rclone.conf": Config file.
|
||||
[snip]
|
||||
```
|
||||
See the [remote setup docs](/remote_setup/) for more info.
|
||||
|
||||
So in this config the config file can be found in
|
||||
`/home/user/.rclone.conf`.
|
||||
|
||||
Just copy that to the equivalent place in the destination (run `rclone
|
||||
-h` above again on the destination machine if not sure).
|
||||
### How do I configure rclone on a remote / headless box with no browser? ###
|
||||
|
||||
This has now been documented in its own [remote setup page](/remote_setup/).
|
||||
|
||||
### Can rclone sync directly from drive to s3 ###
|
||||
|
||||
@@ -101,3 +87,62 @@ of metadata, which breaks the desired 1:1 mapping of files to objects.
|
||||
No, not at present. rclone only does uni-directional sync from A ->
|
||||
B. It may do in the future though since it has all the primitives - it
|
||||
just requires writing the algorithm to do it.
|
||||
|
||||
### Can I use rclone with an HTTP proxy? ###
|
||||
|
||||
Yes. rclone will use the environment variables `HTTP_PROXY`,
|
||||
`HTTPS_PROXY` and `NO_PROXY`, similar to cURL and other programs.
|
||||
|
||||
`HTTPS_PROXY` takes precedence over `HTTP_PROXY` for https requests.
|
||||
|
||||
The environment values may be either a complete URL or a "host[:port]",
|
||||
in which case the "http" scheme is assumed.
|
||||
|
||||
The `NO_PROXY` allows you to disable the proxy for specific hosts.
|
||||
Hosts must be comma separated, and can contain domains or parts.
|
||||
For instance "foo.com" also matches "bar.foo.com".
|
||||
|
||||
### Rclone gives x509: failed to load system roots and no roots provided error ###
|
||||
|
||||
This means that `rclone` can't file the SSL root certificates. Likely
|
||||
you are running `rclone` on a NAS with a cut-down Linux OS, or
|
||||
possibly on Solaris.
|
||||
|
||||
Rclone (via the Go runtime) tries to load the root certificates from
|
||||
these places on Linux.
|
||||
|
||||
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
|
||||
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
|
||||
"/etc/ssl/ca-bundle.pem", // OpenSUSE
|
||||
"/etc/pki/tls/cacert.pem", // OpenELEC
|
||||
|
||||
So doing something like this should fix the problem. It also sets the
|
||||
time which is important for SSL to work properly.
|
||||
|
||||
```
|
||||
mkdir -p /etc/ssl/certs/
|
||||
curl -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
|
||||
ntpclient -s -h pool.ntp.org
|
||||
```
|
||||
|
||||
Note that you may need to add the `--insecure` option to the `curl` command line if it doesn't work without.
|
||||
|
||||
```
|
||||
curl --insecure -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
|
||||
```
|
||||
|
||||
### Rclone gives Failed to load config file: function not implemented error ###
|
||||
|
||||
Likely this means that you are running rclone on Linux version not
|
||||
supported by the go runtime, ie earlier than version 2.6.23.
|
||||
|
||||
See the [system requirements section in the go install
|
||||
docs](https://golang.org/doc/install) for full details.
|
||||
|
||||
### All my uploaded docx/xlsx/pptx files appear as archive/zip ###
|
||||
|
||||
This is caused by uploading these files from a Windows computer which
|
||||
hasn't got the Microsoft Office suite installed. The easiest way to
|
||||
fix is to install the Word viewer and the Microsoft Office
|
||||
Compatibility Pack for Word, Excel, and PowerPoint 2007 and later
|
||||
versions' file formats
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Filtering"
|
||||
description: "Filtering, includes and excludes"
|
||||
date: "2015-09-27"
|
||||
date: "2016-02-09"
|
||||
---
|
||||
|
||||
# Filtering, includes and excludes #
|
||||
@@ -9,14 +9,19 @@ date: "2015-09-27"
|
||||
Rclone has a sophisticated set of include and exclude rules. Some of
|
||||
these are based on patterns and some on other things like file size.
|
||||
|
||||
The filters are applied for the `copy`, `sync`, `move`, `ls`, `lsl`,
|
||||
`md5sum`, `sha1sum`, `size`, `delete` and `check` operations.
|
||||
Note that `purge` does not obey the filters.
|
||||
|
||||
Each path as it passes through rclone is matched against the include
|
||||
and exclude rules. The paths are matched without a leading `/`.
|
||||
and exclude rules like `--include`, `--exclude`, `--include-from`,
|
||||
`--exclude-from`, `--filter`, or `--filter-from`. The simplest way to
|
||||
try them out is using the `ls` command, or `--dry-run` together with
|
||||
`-v`.
|
||||
|
||||
For example the files might be passed to the matching engine like this
|
||||
|
||||
* `file1.jpg`
|
||||
* `file2.jpg`
|
||||
* `directory/file3.jpg`
|
||||
**Important** Due to limitations of the command line parser you can
|
||||
only use any of these options once - if you duplicate them then rclone
|
||||
will use the last one only.
|
||||
|
||||
## Patterns ##
|
||||
|
||||
@@ -24,25 +29,28 @@ The patterns used to match files for inclusion or exclusion are based
|
||||
on "file globs" as used by the unix shell.
|
||||
|
||||
If the pattern starts with a `/` then it only matches at the top level
|
||||
of the directory tree. If it doesn't start with `/` then it is
|
||||
matched starting at the end of the path, but it will only match a
|
||||
complete path element.
|
||||
of the directory tree, relative to the root of the remote.
|
||||
If it doesn't start with `/` then it is matched starting at the
|
||||
**end of the path**, but it will only match a complete path element:
|
||||
|
||||
file.jpg - matches "file.jpg"
|
||||
- matches "directory/file.jpg"
|
||||
- doesn't match "afile.jpg"
|
||||
- doesn't match "directory/afile.jpg"
|
||||
/file.jpg - matches "file.jpg"
|
||||
/file.jpg - matches "file.jpg" in the root directory of the remote
|
||||
- doesn't match "afile.jpg"
|
||||
- doesn't match "directory/file.jpg"
|
||||
|
||||
**Important** Note that you must use `/` in patterns and not `\` even
|
||||
if running on Windows.
|
||||
|
||||
A `*` matches anything but not a `/`.
|
||||
|
||||
*.jpg - matches "file.jpg"
|
||||
- matches "directory/file.jpg"
|
||||
- doesn't match "file.jpg/anotherfile.jpg"
|
||||
- doesn't match "file.jpg/something"
|
||||
|
||||
Use `**` to match anything, including slashes.
|
||||
Use `**` to match anything, including slashes (`/`).
|
||||
|
||||
dir/** - matches "dir/file.jpg"
|
||||
- matches "dir/dir1/dir2/file.jpg"
|
||||
@@ -76,14 +84,38 @@ Special characters can be escaped with a `\` before them.
|
||||
|
||||
\*.jpg - matches "*.jpg"
|
||||
\\.jpg - matches "\.jpg"
|
||||
\[one\].jpeg - matches "[one].jpg"
|
||||
|
||||
\[one\].jpg - matches "[one].jpg"
|
||||
|
||||
Note also that rclone filter globs can only be used in one of the
|
||||
filter command line flags, not in the specification of the remote, so
|
||||
`rclone copy "remote:dir*.jpg" /path/to/dir` won't work - what is
|
||||
required is `rclone --include "*.jpg" copy remote:dir /path/to/dir`
|
||||
|
||||
### Directories ###
|
||||
|
||||
Rclone keeps track of directories that could match any file patterns.
|
||||
|
||||
Eg if you add the include rule
|
||||
|
||||
\a\*.jpg
|
||||
|
||||
Rclone will synthesize the directory include rule
|
||||
|
||||
\a\
|
||||
|
||||
If you put any rules which end in `\` then it will only match
|
||||
directories.
|
||||
|
||||
Directory matches are **only** used to optimise directory access
|
||||
patterns - you must still match the files that you want to match.
|
||||
Directory matches won't optimise anything on bucket based remotes (eg
|
||||
s3, swift, google compute storage, b2) which don't have a concept of
|
||||
directory.
|
||||
|
||||
### Differences between rsync and rclone patterns ###
|
||||
|
||||
Rclone implements bash style `{a,b,c}` glob matching which rsync doesn't.
|
||||
|
||||
Rclone ignores `/` at the end of a pattern.
|
||||
|
||||
Rclone always does a wildcard match so `\` must always escape a `\`.
|
||||
|
||||
## How the rules are used ##
|
||||
@@ -116,6 +148,11 @@ This would exclude
|
||||
* `secret17.jpg`
|
||||
* non `*.jpg` and `*.png`
|
||||
|
||||
A similar process is done on directory entries before recursing into
|
||||
them. This only works on remotes which have a concept of directory
|
||||
(Eg local, google drive, onedrive, amazon drive) and not on bucket
|
||||
based remotes (eg s3, swift, google compute storage, b2).
|
||||
|
||||
## Adding filtering rules ##
|
||||
|
||||
Filtering rules are added with the following command line flags.
|
||||
@@ -148,7 +185,11 @@ Add a single include rule with `--include`.
|
||||
Eg `--include *.{png,jpg}` to include all `png` and `jpg` files in the
|
||||
backup and no others.
|
||||
|
||||
This adds an implicit `--exclude *` at the end of the filter list.
|
||||
This adds an implicit `--exclude *` at the very end of the filter
|
||||
list. This means you can mix `--include` and `--include-from` with the
|
||||
other filters (eg `--exclude`) but you must include all the files you
|
||||
want in the include statement. If this doesn't provide enough
|
||||
flexibility then you must use `--filter-from`.
|
||||
|
||||
### `--include-from` - Read include patterns from file ###
|
||||
|
||||
@@ -166,7 +207,11 @@ Then use as `--include-from include-file.txt`. This will sync all
|
||||
|
||||
This is useful if you have a lot of rules.
|
||||
|
||||
This adds an implicit `--exclude *` at the end of the filter list.
|
||||
This adds an implicit `--exclude *` at the very end of the filter
|
||||
list. This means you can mix `--include` and `--include-from` with the
|
||||
other filters (eg `--exclude`) but you must include all the files you
|
||||
want in the include statement. If this doesn't provide enough
|
||||
flexibility then you must use `--filter-from`.
|
||||
|
||||
### `--filter` - Add a file-filtering rule ###
|
||||
|
||||
@@ -212,6 +257,41 @@ Prepare a file like this `files-from.txt`
|
||||
Then use as `--files-from files-from.txt`. This will only transfer
|
||||
`file1.jpg` and `file2.jpg` providing they exist.
|
||||
|
||||
For example, let's say you had a few files you want to back up
|
||||
regularly with these absolute paths:
|
||||
|
||||
/home/user1/important
|
||||
/home/user1/dir/file
|
||||
/home/user2/stuff
|
||||
|
||||
To copy these you'd find a common subdirectory - in this case `/home`
|
||||
and put the remaining files in `files-from.txt` with or without
|
||||
leading `/`, eg
|
||||
|
||||
user1/important
|
||||
user1/dir/file
|
||||
user2/stuff
|
||||
|
||||
You could then copy these to a remote like this
|
||||
|
||||
rclone copy --files-from files-from.txt /home remote:backup
|
||||
|
||||
The 3 files will arrive in `remote:backup` with the paths as in the
|
||||
`files-from.txt`.
|
||||
|
||||
You could of course choose `/` as the root too in which case your
|
||||
`files-from.txt` might look like this.
|
||||
|
||||
/home/user1/important
|
||||
/home/user1/dir/file
|
||||
/home/user2/stuff
|
||||
|
||||
And you would transfer it like this
|
||||
|
||||
rclone copy --files-from files-from.txt / remote:backup
|
||||
|
||||
In this case there will be an extra `home` directory on the remote.
|
||||
|
||||
### `--min-size` - Don't transfer any file smaller than this ###
|
||||
|
||||
This option controls the minimum size file which will be transferred.
|
||||
@@ -230,6 +310,31 @@ used.
|
||||
For example `--max-size 1G` means no files larger than 1GByte will be
|
||||
transferred.
|
||||
|
||||
### `--max-age` - Don't transfer any file older than this ###
|
||||
|
||||
This option controls the maximum age of files to transfer. Give in
|
||||
seconds or with a suffix of:
|
||||
|
||||
* `ms` - Milliseconds
|
||||
* `s` - Seconds
|
||||
* `m` - Minutes
|
||||
* `h` - Hours
|
||||
* `d` - Days
|
||||
* `w` - Weeks
|
||||
* `M` - Months
|
||||
* `y` - Years
|
||||
|
||||
For example `--max-age 2d` means no files older than 2 days will be
|
||||
transferred.
|
||||
|
||||
### `--min-age` - Don't transfer any file younger than this ###
|
||||
|
||||
This option controls the minimum age of files to transfer. Give in
|
||||
seconds or with a suffix (see `--max-age` for list of suffixes)
|
||||
|
||||
For example `--min-age 2d` means no files younger than 2 days will be
|
||||
transferred.
|
||||
|
||||
### `--delete-excluded` - Delete files on dest excluded from sync ###
|
||||
|
||||
**Important** this flag is dangerous - use with `--dry-run` and `-v` first.
|
||||
|
||||
@@ -26,21 +26,39 @@ d) Delete remote
|
||||
q) Quit config
|
||||
e/n/d/q> n
|
||||
name> remote
|
||||
What type of source is it?
|
||||
Choose a number from below
|
||||
1) swift
|
||||
2) s3
|
||||
3) local
|
||||
4) google cloud storage
|
||||
5) dropbox
|
||||
6) drive
|
||||
type> 4
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Dropbox
|
||||
\ "dropbox"
|
||||
5 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
6 / Google Drive
|
||||
\ "drive"
|
||||
7 / Hubic
|
||||
\ "hubic"
|
||||
8 / Local Disk
|
||||
\ "local"
|
||||
9 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
11 / Yandex Disk
|
||||
\ "yandex"
|
||||
Storage> 5
|
||||
Google Application Client Id - leave blank normally.
|
||||
client_id>
|
||||
Google Application Client Secret - leave blank normally.
|
||||
client_secret>
|
||||
Project number optional - needed only for list/create/delete buckets - see your developer console.
|
||||
project_number> 12345678
|
||||
Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login.
|
||||
service_account_file>
|
||||
Access Control List for new objects.
|
||||
Choose a number from below, or type in your own value
|
||||
* Object owner gets OWNER access, and all Authenticated Users get READER access.
|
||||
@@ -123,6 +141,30 @@ files in the bucket.
|
||||
|
||||
rclone sync /home/local/directory remote:bucket
|
||||
|
||||
### Service Account support ###
|
||||
|
||||
You can set up rclone with Google Cloud Storage in an unattended mode,
|
||||
i.e. not tied to a specific end-user Google account. This is useful
|
||||
when you want to synchronise files onto machines that don't have
|
||||
actively logged-in users, for example build machines.
|
||||
|
||||
To get credentials for Google Cloud Platform
|
||||
[IAM Service Accounts](https://cloud.google.com/iam/docs/service-accounts),
|
||||
please head to the
|
||||
[Service Account](https://console.cloud.google.com/permissions/serviceaccounts)
|
||||
section of the Google Developer Console. Service Accounts behave just
|
||||
like normal `User` permissions in
|
||||
[Google Cloud Storage ACLs](https://cloud.google.com/storage/docs/access-control),
|
||||
so you can limit their access (e.g. make them read only). After
|
||||
creating an account, a JSON file containing the Service Account's
|
||||
credentials will be downloaded onto your machines. These credentials
|
||||
are what rclone will use for authentication.
|
||||
|
||||
To use a Service Account instead of OAuth2 token flow, enter the path
|
||||
to your Service Account credentials at the `service_account_file`
|
||||
prompt and rclone won't use the browser based authentication
|
||||
flow.
|
||||
|
||||
### Modified time ###
|
||||
|
||||
Google google cloud storage stores md5sums natively and rclone stores
|
||||
|
||||
129
docs/content/hubic.md
Normal file
129
docs/content/hubic.md
Normal file
@@ -0,0 +1,129 @@
|
||||
---
|
||||
title: "Hubic"
|
||||
description: "Rclone docs for Hubic"
|
||||
date: "2016-05-27"
|
||||
---
|
||||
|
||||
<i class="fa fa-space-shuttle"></i> Hubic
|
||||
-----------------------------------------
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths are specified as `remote:container` (or `remote:` for the `lsd`
|
||||
command.) You may put subdirectories in too, eg `remote:container/path/to/dir`.
|
||||
|
||||
The initial setup for Hubic involves getting a token from Hubic which
|
||||
you need to do in your browser. `rclone config` walks you through it.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
n/s> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Dropbox
|
||||
\ "dropbox"
|
||||
5 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
6 / Google Drive
|
||||
\ "drive"
|
||||
7 / Hubic
|
||||
\ "hubic"
|
||||
8 / Local Disk
|
||||
\ "local"
|
||||
9 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
11 / Yandex Disk
|
||||
\ "yandex"
|
||||
Storage> 7
|
||||
Hubic Client Id - leave blank normally.
|
||||
client_id>
|
||||
Hubic Client Secret - leave blank normally.
|
||||
client_secret>
|
||||
Remote config
|
||||
Use auto config?
|
||||
* Say Y if not sure
|
||||
* Say N if you are working on a remote or headless machine
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
--------------------
|
||||
[remote]
|
||||
client_id =
|
||||
client_secret =
|
||||
token = {"access_token":"XXXXXX"}
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Hubic. This only runs from the moment it opens
|
||||
your browser to the moment you get back the verification code. This
|
||||
is on `http://127.0.0.1:53682/` and this it may require you to unblock
|
||||
it temporarily if you are running a host firewall.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
List containers in the top level of your Hubic
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
List all the files in your Hubic
|
||||
|
||||
rclone ls remote:
|
||||
|
||||
To copy a local directory to an Hubic directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
If you want the directory to be visible in the official *Hubic
|
||||
browser*, you need to copy your files to the `default` directory
|
||||
|
||||
rclone copy /home/source remote:default/backup
|
||||
|
||||
### Modified time ###
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`X-Object-Meta-Mtime` as floating point since the epoch accurate to 1
|
||||
ns.
|
||||
|
||||
This is a defacto standard (used in the official python-swiftclient
|
||||
amongst others) for storing the modification time for an object.
|
||||
|
||||
Note that Hubic wraps the Swift backend, so most of the properties of
|
||||
are the same.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
This uses the normal OpenStack Swift mechanism to refresh the Swift
|
||||
API credentials and ignores the expires field returned by the Hubic
|
||||
API.
|
||||
|
||||
The Swift API doesn't return a correct MD5SUM for segmented files
|
||||
(Dynamic or Static Large Objects) so rclone won't check or use the
|
||||
MD5SUM for these.
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Install"
|
||||
description: "Rclone Installation"
|
||||
date: "2015-06-12"
|
||||
date: "2016-03-28"
|
||||
---
|
||||
|
||||
Install
|
||||
@@ -11,15 +11,15 @@ Rclone is a Go program and comes as a single binary file.
|
||||
|
||||
[Download](/downloads/) the relevant binary.
|
||||
|
||||
Or alternatively if you have Go installed use
|
||||
Or alternatively if you have Go 1.5+ installed use
|
||||
|
||||
go get github.com/ncw/rclone
|
||||
|
||||
and this will build the binary in `$GOPATH/bin`. If you have built
|
||||
rclone before then you will want to update its dependencies first with
|
||||
this (remove `-f` if using go < 1.4)
|
||||
this
|
||||
|
||||
go get -u -v -f github.com/ncw/rclone/...
|
||||
go get -u -v github.com/ncw/rclone/...
|
||||
|
||||
See the [Usage section](/docs/) of the docs for how to use rclone, or
|
||||
run `rclone -h`.
|
||||
@@ -37,3 +37,20 @@ linux binary downloaded files install example
|
||||
sudo mkdir -p /usr/local/share/man/man1
|
||||
sudo cp rclone.1 /usr/local/share/man/man1/
|
||||
sudo mandb
|
||||
|
||||
Installation with Ansible
|
||||
-------
|
||||
|
||||
This can be done with [Stefan Weichinger's ansible
|
||||
role](https://github.com/stefangweichinger/ansible-rclone).
|
||||
|
||||
Instructions
|
||||
|
||||
1. `git clone https://github.com/stefangweichinger/ansible-rclone.git` into your local roles-directory
|
||||
2. add the role to the hosts you want rclone installed to:
|
||||
|
||||
```
|
||||
- hosts: rclone-hosts
|
||||
roles:
|
||||
- rclone
|
||||
```
|
||||
|
||||
@@ -25,9 +25,13 @@ on OS X.
|
||||
### Filenames ###
|
||||
|
||||
Filenames are expected to be encoded in UTF-8 on disk. This is the
|
||||
normal case for Windows and OS X. There is a bit more uncertainty in
|
||||
the Linux world, but new distributions will have UTF-8 encoded files
|
||||
names.
|
||||
normal case for Windows and OS X.
|
||||
|
||||
There is a bit more uncertainty in the Linux world, but new
|
||||
distributions will have UTF-8 encoded files names. If you are using an
|
||||
old Linux filesystem with non UTF-8 file names (eg latin1) then you
|
||||
can use the `convmv` tool to convert the filesystem to UTF-8. This
|
||||
tool is available in most distributions' package managers.
|
||||
|
||||
If an invalid (non-UTF8) filename is read, the invalid caracters will
|
||||
be replaced with the unicode replacement character, '<27>'. `rclone`
|
||||
@@ -36,3 +40,37 @@ will emit a debug message in this case (use `-v` to see), eg
|
||||
```
|
||||
Local file system at .: Replacing invalid UTF-8 characters in "gro\xdf"
|
||||
```
|
||||
|
||||
### Long paths on Windows ###
|
||||
|
||||
Rclone handles long paths automatically, by converting all paths to long
|
||||
[UNC paths](https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath)
|
||||
which allows paths up to 32,767 characters.
|
||||
|
||||
This is why you will see that your paths, for instance `c:\files` is
|
||||
converted to the UNC path `\\?\c:\files` in the output,
|
||||
and `\\server\share` is converted to `\\?\UNC\server\share`.
|
||||
|
||||
However, in rare cases this may cause problems with buggy file
|
||||
system drivers like [EncFS](https://github.com/ncw/rclone/issues/261).
|
||||
To disable UNC conversion globally, add this to your `.rclone.conf` file:
|
||||
|
||||
```
|
||||
[local]
|
||||
nounc = true
|
||||
```
|
||||
|
||||
If you want to selectively disable UNC, you can add it to a separate entry like this:
|
||||
|
||||
```
|
||||
[nounc]
|
||||
type = local
|
||||
nounc = true
|
||||
```
|
||||
And use rclone like this:
|
||||
|
||||
`rclone copy c:\src nounc:z:\dst`
|
||||
|
||||
This will use UNC paths on `c:\src` but not on `z:\dst`.
|
||||
Of course this will cause problems if the absolute path length of a
|
||||
file exceeds 258 characters on z, so only use this option if you have to.
|
||||
|
||||
149
docs/content/onedrive.md
Normal file
149
docs/content/onedrive.md
Normal file
@@ -0,0 +1,149 @@
|
||||
---
|
||||
title: "Microsoft One Drive"
|
||||
description: "Rclone docs for Microsoft One Drive"
|
||||
date: "2015-10-14"
|
||||
---
|
||||
|
||||
<i class="fa fa-windows"></i> Microsoft One Drive
|
||||
-----------------------------------------
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||
|
||||
The initial setup for One Drive involves getting a token from
|
||||
Microsoft which you need to do in your browser. `rclone config` walks
|
||||
you through it.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
n/s> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Dropbox
|
||||
\ "dropbox"
|
||||
5 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
6 / Google Drive
|
||||
\ "drive"
|
||||
7 / Hubic
|
||||
\ "hubic"
|
||||
8 / Local Disk
|
||||
\ "local"
|
||||
9 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
11 / Yandex Disk
|
||||
\ "yandex"
|
||||
Storage> 9
|
||||
Microsoft App Client Id - leave blank normally.
|
||||
client_id>
|
||||
Microsoft App Client Secret - leave blank normally.
|
||||
client_secret>
|
||||
Remote config
|
||||
Use auto config?
|
||||
* Say Y if not sure
|
||||
* Say N if you are working on a remote or headless machine
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
--------------------
|
||||
[remote]
|
||||
client_id =
|
||||
client_secret =
|
||||
token = {"access_token":"XXXXXX"}
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Microsoft. This only runs from the moment it
|
||||
opens your browser to the moment you get back the verification
|
||||
code. This is on `http://127.0.0.1:53682/` and this it may require
|
||||
you to unblock it temporarily if you are running a host firewall.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
List directories in top level of your One Drive
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
List all the files in your One Drive
|
||||
|
||||
rclone ls remote:
|
||||
|
||||
To copy a local directory to an One Drive directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
### Modified time and hashes ###
|
||||
|
||||
One Drive allows modification times to be set on objects accurate to 1
|
||||
second. These will be used to detect whether objects need syncing or
|
||||
not.
|
||||
|
||||
One drive supports SHA1 type hashes, so you can use `--checksum` flag.
|
||||
|
||||
|
||||
### Deleting files ###
|
||||
|
||||
Any files you delete with rclone will end up in the trash. Microsoft
|
||||
doesn't provide an API to permanently delete files, nor to empty the
|
||||
trash, so you will have to do that with one of Microsoft's apps or via
|
||||
the One Drive website.
|
||||
|
||||
### Specific options ###
|
||||
|
||||
Here are the command line options specific to this cloud storage
|
||||
system.
|
||||
|
||||
#### --onedrive-chunk-size=SIZE ####
|
||||
|
||||
Above this size files will be chunked - must be multiple of 320k. The
|
||||
default is 10MB. Note that the chunks will be buffered into memory.
|
||||
|
||||
#### --onedrive-upload-cutoff=SIZE ####
|
||||
|
||||
Cutoff for switching to chunked upload - must be <= 100MB. The default
|
||||
is 10MB.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that One Drive is case insensitive so you can't have a
|
||||
file called "Hello.doc" and one called "hello.doc".
|
||||
|
||||
Rclone only supports your default One Drive, and doesn't work with One
|
||||
Drive for business. Both these issues may be fixed at some point
|
||||
depending on user demand!
|
||||
|
||||
There are quite a few characters that can't be in One Drive file
|
||||
names. These can't occur on Windows platforms, but on non-Windows
|
||||
platforms they are common. Rclone will map these names to and from an
|
||||
identical looking unicode equivalent. For example if a file has a `?`
|
||||
in it will be mapped to `?` instead.
|
||||
@@ -15,23 +15,30 @@ show through.
|
||||
|
||||
Here is an overview of the major features of each cloud storage system.
|
||||
|
||||
| Name | MD5SUM | ModTime | Case Sensitive | Duplicate Files |
|
||||
| ---------------------- |:-------:|:-------:|:--------------:|:---------------:|
|
||||
| Google Drive | Yes | Yes | No | Yes |
|
||||
| Amazon S3 | Yes | Yes | No | No |
|
||||
| Openstack Swift | Yes | Yes | No | No |
|
||||
| Dropbox | No | No | Yes | No |
|
||||
| Google Cloud Storage | Yes | Yes | No | No |
|
||||
| Amazon Cloud Drive | Yes | No | Yes | No |
|
||||
| The local filesystem | Yes | Yes | Depends | No |
|
||||
| Name | Hash | ModTime | Case Insensitive | Duplicate Files |
|
||||
| ---------------------- |:-------:|:-------:|:----------------:|:---------------:|
|
||||
| Google Drive | MD5 | Yes | No | Yes |
|
||||
| Amazon S3 | MD5 | Yes | No | No |
|
||||
| Openstack Swift | MD5 | Yes | No | No |
|
||||
| Dropbox | - | No | Yes | No |
|
||||
| Google Cloud Storage | MD5 | Yes | No | No |
|
||||
| Amazon Drive | MD5 | No | Yes | No |
|
||||
| Microsoft One Drive | SHA1 | Yes | Yes | No |
|
||||
| Hubic | MD5 | Yes | No | No |
|
||||
| Backblaze B2 | SHA1 | Yes | No | No |
|
||||
| Yandex Disk | MD5 | Yes | No | No |
|
||||
| The local filesystem | All | Yes | Depends | No |
|
||||
|
||||
### MD5SUM ###
|
||||
### Hash ###
|
||||
|
||||
The cloud storage system supports MD5SUMs of the objects. This
|
||||
is used if available when transferring data as an integrity check and
|
||||
The cloud storage system supports various hash types of the objects.
|
||||
The hashes are used when transferring data as an integrity check and
|
||||
can be specifically used with the `--checksum` flag in syncs and in
|
||||
the `check` command.
|
||||
|
||||
To use the checksum checks between filesystems they must support a
|
||||
common hash type.
|
||||
|
||||
### ModTime ###
|
||||
|
||||
The cloud storage system supports setting modification times on
|
||||
@@ -42,7 +49,7 @@ default, though the MD5SUM can be checked with the `--checksum` flag.
|
||||
All cloud storage systems support some kind of date on the object and
|
||||
these will be set when transferring from the cloud storage system.
|
||||
|
||||
### Case Sensitive ###
|
||||
### Case Insensitive ###
|
||||
|
||||
If a cloud storage systems is case sensitive then it is possible to
|
||||
have two files which differ only in case, eg `file.txt` and
|
||||
@@ -55,7 +62,7 @@ matter how many times you run the sync it never completes fully.
|
||||
|
||||
The local filesystem may or may not be case sensitive depending on OS.
|
||||
|
||||
* Windows - usuall case insensitive
|
||||
* Windows - usually case insensitive, though case is preserved
|
||||
* OSX - usually case insensitive, though it is possible to format case sensitive
|
||||
* Linux - usually case sensitive, but there are case insensitive file systems (eg FAT formatted USB keys)
|
||||
|
||||
@@ -68,4 +75,5 @@ systems.
|
||||
If a cloud storage system allows duplicate files then it can have two
|
||||
objects with the same name.
|
||||
|
||||
This confuses rclone greatly when syncing.
|
||||
This confuses rclone greatly when syncing - use the `rclone dedupe`
|
||||
command to rename or remove duplicates.
|
||||
|
||||
88
docs/content/remote_setup.md
Normal file
88
docs/content/remote_setup.md
Normal file
@@ -0,0 +1,88 @@
|
||||
---
|
||||
title: "Remote Setup"
|
||||
description: "Configuring rclone up on a remote / headless machine"
|
||||
date: "2016-01-07"
|
||||
---
|
||||
|
||||
# Configuring rclone on a remote / headless machine #
|
||||
|
||||
Some of the configurations (those involving oauth2) require an
|
||||
Internet connected web browser.
|
||||
|
||||
If you are trying to set rclone up on a remote or headless box with no
|
||||
browser available on it (eg a NAS or a server in a datacenter) then
|
||||
you will need to use an alternative means of configuration. There are
|
||||
two ways of doing it, described below.
|
||||
|
||||
## Configuring using rclone authorize ##
|
||||
|
||||
On the headless box
|
||||
|
||||
```
|
||||
...
|
||||
Remote config
|
||||
Use auto config?
|
||||
* Say Y if not sure
|
||||
* Say N if you are working on a remote or headless machine
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> n
|
||||
For this to work, you will need rclone available on a machine that has a web browser available.
|
||||
Execute the following on your machine:
|
||||
rclone authorize "amazon cloud drive"
|
||||
Then paste the result below:
|
||||
result>
|
||||
```
|
||||
|
||||
Then on your main desktop machine
|
||||
|
||||
```
|
||||
rclone authorize "amazon cloud drive"
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
Paste the following into your remote machine --->
|
||||
SECRET_TOKEN
|
||||
<---End paste
|
||||
```
|
||||
|
||||
Then back to the headless box, paste in the code
|
||||
|
||||
```
|
||||
result> SECRET_TOKEN
|
||||
--------------------
|
||||
[acd12]
|
||||
client_id =
|
||||
client_secret =
|
||||
token = SECRET_TOKEN
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d>
|
||||
```
|
||||
|
||||
## Configuring by copying the config file ##
|
||||
|
||||
Rclone stores all of its config in a single configuration file. This
|
||||
can easily be copied to configure a remote rclone.
|
||||
|
||||
So first configure rclone on your desktop machine
|
||||
|
||||
rclone config
|
||||
|
||||
to set up the config file.
|
||||
|
||||
Find the config file by running `rclone -h` and looking for the help for the `--config` option
|
||||
|
||||
```
|
||||
$ rclone -h
|
||||
[snip]
|
||||
--config="/home/user/.rclone.conf": Config file.
|
||||
[snip]
|
||||
```
|
||||
|
||||
Now transfer it to the remote box (scp, cut paste, ftp, sftp etc) and
|
||||
place it in the correct place (use `rclone -h` on the remote box to
|
||||
find out where).
|
||||
@@ -1,10 +1,10 @@
|
||||
---
|
||||
title: "Amazon S3"
|
||||
description: "Rclone docs for Amazon S3"
|
||||
date: "2014-04-26"
|
||||
date: "2016-07-11"
|
||||
---
|
||||
|
||||
<i class="fa fa-archive"></i> Amazon S3
|
||||
<i class="fa fa-amazon"></i> Amazon S3
|
||||
---------------------------------------
|
||||
|
||||
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
|
||||
@@ -19,39 +19,82 @@ This will guide you through an interactive setup process.
|
||||
```
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
q) Quit config
|
||||
n/q> n
|
||||
s) Set configuration password
|
||||
n/s> n
|
||||
name> remote
|
||||
What type of source is it?
|
||||
Choose a number from below
|
||||
1) swift
|
||||
2) s3
|
||||
3) local
|
||||
4) google cloud storage
|
||||
5) dropbox
|
||||
6) drive
|
||||
type> 2
|
||||
AWS Access Key ID.
|
||||
access_key_id> accesskey
|
||||
AWS Secret Access Key (password).
|
||||
secret_access_key> secretaccesskey
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Dropbox
|
||||
\ "dropbox"
|
||||
5 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
6 / Google Drive
|
||||
\ "drive"
|
||||
7 / Hubic
|
||||
\ "hubic"
|
||||
8 / Local Disk
|
||||
\ "local"
|
||||
9 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
11 / Yandex Disk
|
||||
\ "yandex"
|
||||
Storage> 2
|
||||
Get AWS credentials from runtime (environment variables or EC2 meta data if no env vars). Only applies if access_key_id and secret_access_key is blank.
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Enter AWS credentials in the next step
|
||||
\ "false"
|
||||
2 / Get AWS credentials from the environment (env vars or IAM)
|
||||
\ "true"
|
||||
env_auth> 1
|
||||
AWS Access Key ID - leave blank for anonymous access or runtime credentials.
|
||||
access_key_id> access_key
|
||||
AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials.
|
||||
secret_access_key> secret_key
|
||||
Region to connect to.
|
||||
Choose a number from below, or type in your own value
|
||||
* The default endpoint - a good choice if you are unsure.
|
||||
* US Region, Northern Virginia or Pacific Northwest.
|
||||
* Leave location constraint empty.
|
||||
1) us-east-1
|
||||
* US West (Oregon) Region
|
||||
* Needs location constraint us-west-2.
|
||||
2) us-west-2
|
||||
[snip]
|
||||
* South America (Sao Paulo) Region
|
||||
* Needs location constraint sa-east-1.
|
||||
9) sa-east-1
|
||||
* If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint.
|
||||
10) other-v2-signature
|
||||
* If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint.
|
||||
11) other-v4-signature
|
||||
/ The default endpoint - a good choice if you are unsure.
|
||||
1 | US Region, Northern Virginia or Pacific Northwest.
|
||||
| Leave location constraint empty.
|
||||
\ "us-east-1"
|
||||
/ US West (Oregon) Region
|
||||
2 | Needs location constraint us-west-2.
|
||||
\ "us-west-2"
|
||||
/ US West (Northern California) Region
|
||||
3 | Needs location constraint us-west-1.
|
||||
\ "us-west-1"
|
||||
/ EU (Ireland) Region Region
|
||||
4 | Needs location constraint EU or eu-west-1.
|
||||
\ "eu-west-1"
|
||||
/ EU (Frankfurt) Region
|
||||
5 | Needs location constraint eu-central-1.
|
||||
\ "eu-central-1"
|
||||
/ Asia Pacific (Singapore) Region
|
||||
6 | Needs location constraint ap-southeast-1.
|
||||
\ "ap-southeast-1"
|
||||
/ Asia Pacific (Sydney) Region
|
||||
7 | Needs location constraint ap-southeast-2.
|
||||
\ "ap-southeast-2"
|
||||
/ Asia Pacific (Tokyo) Region
|
||||
8 | Needs location constraint ap-northeast-1.
|
||||
\ "ap-northeast-1"
|
||||
/ South America (Sao Paulo) Region
|
||||
9 | Needs location constraint sa-east-1.
|
||||
\ "sa-east-1"
|
||||
/ If using an S3 clone that only understands v2 signatures
|
||||
10 | eg Ceph/Dreamhost
|
||||
| set this and make sure you set the endpoint.
|
||||
\ "other-v2-signature"
|
||||
/ If using an S3 clone that understands v4 signatures set this
|
||||
11 | and make sure you set the endpoint.
|
||||
\ "other-v4-signature"
|
||||
region> 1
|
||||
Endpoint for S3 API.
|
||||
Leave blank if using AWS to use the default endpoint for the region.
|
||||
@@ -59,21 +102,38 @@ Specify if using an S3 clone such as Ceph.
|
||||
endpoint>
|
||||
Location constraint - must be set to match the Region. Used when creating buckets only.
|
||||
Choose a number from below, or type in your own value
|
||||
* Empty for US Region, Northern Virginia or Pacific Northwest.
|
||||
1)
|
||||
* US West (Oregon) Region.
|
||||
2) us-west-2
|
||||
* US West (Northern California) Region.
|
||||
3) us-west-1
|
||||
* EU (Ireland) Region.
|
||||
4) eu-west-1
|
||||
[snip]
|
||||
1 / Empty for US Region, Northern Virginia or Pacific Northwest.
|
||||
\ ""
|
||||
2 / US West (Oregon) Region.
|
||||
\ "us-west-2"
|
||||
3 / US West (Northern California) Region.
|
||||
\ "us-west-1"
|
||||
4 / EU (Ireland) Region.
|
||||
\ "eu-west-1"
|
||||
5 / EU Region.
|
||||
\ "EU"
|
||||
6 / Asia Pacific (Singapore) Region.
|
||||
\ "ap-southeast-1"
|
||||
7 / Asia Pacific (Sydney) Region.
|
||||
\ "ap-southeast-2"
|
||||
8 / Asia Pacific (Tokyo) Region.
|
||||
\ "ap-northeast-1"
|
||||
9 / South America (Sao Paulo) Region.
|
||||
\ "sa-east-1"
|
||||
location_constraint> 1
|
||||
The server-side encryption algorithm used when storing this object in S3.
|
||||
Choose a number from below, or type in your own value
|
||||
1 / None
|
||||
\ ""
|
||||
2 / AES256
|
||||
\ "AES256"
|
||||
server_side_encryption>
|
||||
Remote config
|
||||
--------------------
|
||||
[remote]
|
||||
access_key_id = accesskey
|
||||
secret_access_key = secretaccesskey
|
||||
env_auth = false
|
||||
access_key_id = access_key
|
||||
secret_access_key = secret_key
|
||||
region = us-east-1
|
||||
endpoint =
|
||||
location_constraint =
|
||||
@@ -82,17 +142,6 @@ y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
Current remotes:
|
||||
|
||||
Name Type
|
||||
==== ====
|
||||
remote s3
|
||||
|
||||
e) Edit existing remote
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
q) Quit config
|
||||
e/n/d/q> q
|
||||
```
|
||||
|
||||
This remote is called `remote` and can now be used like this
|
||||
@@ -133,36 +182,59 @@ created in. If you attempt to access a bucket from the wrong region,
|
||||
you will get an error, `incorrect region, the bucket is not in 'XXX'
|
||||
region`.
|
||||
|
||||
### Authentication ###
|
||||
There are two ways to supply `rclone` with a set of AWS
|
||||
credentials. In order of precedence:
|
||||
|
||||
- Directly in the rclone configuration file (as configured by `rclone config`)
|
||||
- set `access_key_id` and `secret_access_key`
|
||||
- Runtime configuration:
|
||||
- set `env_auth` to `true` in the config file
|
||||
- Exporting the following environment variables before running `rclone`
|
||||
- Access Key ID: `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`
|
||||
- Secret Access Key: `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY`
|
||||
- Running `rclone` on an EC2 instance with an IAM role
|
||||
|
||||
If none of these option actually end up providing `rclone` with AWS
|
||||
credentials then S3 interaction will be non-authenticated (see below).
|
||||
|
||||
### Anonymous access to public buckets ###
|
||||
|
||||
If you want to use rclone to access a public bucket, configure with a
|
||||
blank `access_key_id` and `secret_access_key`. Eg
|
||||
|
||||
```
|
||||
e) Edit existing remote
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
q) Quit config
|
||||
e/n/d/q> n
|
||||
n/q> n
|
||||
name> anons3
|
||||
What type of source is it?
|
||||
Choose a number from below
|
||||
1) amazon cloud drive
|
||||
2) drive
|
||||
3) dropbox
|
||||
4) google cloud storage
|
||||
5) local
|
||||
6) s3
|
||||
7) swift
|
||||
type> 6
|
||||
AWS Access Key ID - leave blank for anonymous access.
|
||||
access_key_id>
|
||||
AWS Secret Access Key (password) - leave blank for anonymous access.
|
||||
secret_access_key>
|
||||
Region to connect to.
|
||||
region> 1
|
||||
endpoint>
|
||||
location_constraint>
|
||||
2) b2
|
||||
3) drive
|
||||
4) dropbox
|
||||
5) google cloud storage
|
||||
6) swift
|
||||
7) hubic
|
||||
8) local
|
||||
9) onedrive
|
||||
10) s3
|
||||
11) yandex
|
||||
type> 10
|
||||
Get AWS credentials from runtime (environment variables or EC2 meta data if no env vars). Only applies if access_key_id and secret_access_key is blank.
|
||||
Choose a number from below, or type in your own value
|
||||
* Enter AWS credentials in the next step
|
||||
1) false
|
||||
* Get AWS credentials from the environment (env vars or IAM)
|
||||
2) true
|
||||
env_auth> 1
|
||||
AWS Access Key ID - leave blank for anonymous access or runtime credentials.
|
||||
access_key_id>
|
||||
AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials.
|
||||
secret_access_key>
|
||||
...
|
||||
```
|
||||
|
||||
Then use it as normal with the name of the public bucket, eg
|
||||
@@ -209,3 +281,60 @@ removed).
|
||||
|
||||
Because this is a json dump, it is encoding the `/` as `\/`, so if you
|
||||
use the secret key as `xxxxxx/xxxx` it will work fine.
|
||||
|
||||
### Minio ###
|
||||
|
||||
[Minio](https://minio.io/) is an object storage server built for cloud application developers and devops.
|
||||
|
||||
It is very easy to install and provides an S3 compatible server which can be used by rclone.
|
||||
|
||||
To use it, install Minio following the instructions from the web site.
|
||||
|
||||
When it configures itself Minio will print something like this
|
||||
|
||||
```
|
||||
AccessKey: WLGDGYAQYIGI833EV05A SecretKey: BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF Region: us-east-1
|
||||
|
||||
Minio Object Storage:
|
||||
http://127.0.0.1:9000
|
||||
http://10.0.0.3:9000
|
||||
|
||||
Minio Browser:
|
||||
http://127.0.0.1:9000
|
||||
http://10.0.0.3:9000
|
||||
```
|
||||
|
||||
These details need to go into `rclone config` like this. Note that it
|
||||
is important to put the region in as stated above.
|
||||
|
||||
```
|
||||
env_auth> 1
|
||||
access_key_id> WLGDGYAQYIGI833EV05A
|
||||
secret_access_key> BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF
|
||||
region> us-east-1
|
||||
endpoint> http://10.0.0.3:9000
|
||||
location_constraint>
|
||||
server_side_encryption>
|
||||
```
|
||||
|
||||
Which makes the config file look like this
|
||||
|
||||
```
|
||||
[minio]
|
||||
env_auth = false
|
||||
access_key_id = WLGDGYAQYIGI833EV05A
|
||||
secret_access_key = BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF
|
||||
region = us-east-1
|
||||
endpoint = http://10.0.0.3:9000
|
||||
location_constraint =
|
||||
server_side_encryption =
|
||||
```
|
||||
|
||||
Minio doesn't support all the features of S3 yet. In particular it
|
||||
doesn't support MD5 checksums (ETags) or metadata. This means rclone
|
||||
can't check MD5SUMs or store the modified date. However you can work
|
||||
around this with the `--size-only` flag of rclone.
|
||||
|
||||
So once set up, for example to copy files into a bucket
|
||||
|
||||
rclone --size-only copy /path/to/files minio:bucket
|
||||
|
||||
@@ -25,42 +25,74 @@ This will guide you through an interactive setup process.
|
||||
```
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
q) Quit config
|
||||
n/q> n
|
||||
s) Set configuration password
|
||||
n/s> n
|
||||
name> remote
|
||||
What type of source is it?
|
||||
Choose a number from below
|
||||
1) swift
|
||||
2) s3
|
||||
3) local
|
||||
4) drive
|
||||
type> 1
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Dropbox
|
||||
\ "dropbox"
|
||||
5 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
6 / Google Drive
|
||||
\ "drive"
|
||||
7 / Hubic
|
||||
\ "hubic"
|
||||
8 / Local Disk
|
||||
\ "local"
|
||||
9 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
11 / Yandex Disk
|
||||
\ "yandex"
|
||||
Storage> 10
|
||||
User name to log in.
|
||||
user> user_name
|
||||
API key or password.
|
||||
key> password_or_api_key
|
||||
Authentication URL for server.
|
||||
Choose a number from below, or type in your own value
|
||||
* Rackspace US
|
||||
1) https://auth.api.rackspacecloud.com/v1.0
|
||||
* Rackspace UK
|
||||
2) https://lon.auth.api.rackspacecloud.com/v1.0
|
||||
* Rackspace v2
|
||||
3) https://identity.api.rackspacecloud.com/v2.0
|
||||
* Memset Memstore UK
|
||||
4) https://auth.storage.memset.com/v1.0
|
||||
* Memset Memstore UK v2
|
||||
5) https://auth.storage.memset.com/v2.0
|
||||
1 / Rackspace US
|
||||
\ "https://auth.api.rackspacecloud.com/v1.0"
|
||||
2 / Rackspace UK
|
||||
\ "https://lon.auth.api.rackspacecloud.com/v1.0"
|
||||
3 / Rackspace v2
|
||||
\ "https://identity.api.rackspacecloud.com/v2.0"
|
||||
4 / Memset Memstore UK
|
||||
\ "https://auth.storage.memset.com/v1.0"
|
||||
5 / Memset Memstore UK v2
|
||||
\ "https://auth.storage.memset.com/v2.0"
|
||||
6 / OVH
|
||||
\ "https://auth.cloud.ovh.net/v2.0"
|
||||
auth> 1
|
||||
User domain - optional (v3 auth)
|
||||
domain> Default
|
||||
Tenant name - optional
|
||||
tenant>
|
||||
tenant>
|
||||
Tenant domain - optional (v3 auth)
|
||||
tenant_domain>
|
||||
Region name - optional
|
||||
region>
|
||||
Storage URL - optional
|
||||
storage_url>
|
||||
Remote config
|
||||
AuthVersion - optional - set to (1,2,3) if your auth URL has no version
|
||||
auth_version>
|
||||
--------------------
|
||||
[remote]
|
||||
user = user_name
|
||||
key = password_or_api_key
|
||||
auth = https://auth.api.rackspacecloud.com/v1.0
|
||||
tenant =
|
||||
tenant =
|
||||
region =
|
||||
storage_url =
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
@@ -87,6 +119,16 @@ excess files in the container.
|
||||
|
||||
rclone sync /home/local/directory remote:container
|
||||
|
||||
### Specific options ###
|
||||
|
||||
Here are the command line options specific to this cloud storage
|
||||
system.
|
||||
|
||||
#### --swift-chunk-size=SIZE ####
|
||||
|
||||
Above this size files will be chunked into a _segments container. The
|
||||
default for this is 5GB which is its maximum value.
|
||||
|
||||
### Modified time ###
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
@@ -95,3 +137,25 @@ ns.
|
||||
|
||||
This is a defacto standard (used in the official python-swiftclient
|
||||
amongst others) for storing the modification time for an object.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
The Swift API doesn't return a correct MD5SUM for segmented files
|
||||
(Dynamic or Static Large Objects) so rclone won't check or use the
|
||||
MD5SUM for these.
|
||||
|
||||
### Troubleshooting ###
|
||||
|
||||
#### Rclone gives Failed to create file system for "remote:": Bad Request ####
|
||||
|
||||
Due to an oddity of the underlying swift library, it gives a "Bad
|
||||
Request" error rather than a more sensible error when the
|
||||
authentication fails for Swift.
|
||||
|
||||
So this most likely means your username / password is wrong. You can
|
||||
investigate further with the `--dump-bodies` flag.
|
||||
|
||||
#### Rclone gives Failed to create file system: Response didn't have storage storage url and auth token ####
|
||||
|
||||
This is most likely caused by forgetting to specify your tenant when
|
||||
setting up a swift remote.
|
||||
|
||||
113
docs/content/yandex.md
Normal file
113
docs/content/yandex.md
Normal file
@@ -0,0 +1,113 @@
|
||||
---
|
||||
title: "Yandex"
|
||||
description: "Yandex Disk"
|
||||
date: "2015-12-30"
|
||||
---
|
||||
|
||||
<i class="fa fa-space-shuttle"></i>Yandex Disk
|
||||
----------------------------------------
|
||||
|
||||
[Yandex Disk](https://disk.yandex.com) is a cloud storage solution created by [Yandex](http://yandex.com).
|
||||
|
||||
Yandex paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||
|
||||
Here is an example of making a yandex configuration. First run
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
n/s> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Dropbox
|
||||
\ "dropbox"
|
||||
5 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
6 / Google Drive
|
||||
\ "drive"
|
||||
7 / Hubic
|
||||
\ "hubic"
|
||||
8 / Local Disk
|
||||
\ "local"
|
||||
9 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
10 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
11 / Yandex Disk
|
||||
\ "yandex"
|
||||
Storage> 11
|
||||
Yandex Client Id - leave blank normally.
|
||||
client_id>
|
||||
Yandex Client Secret - leave blank normally.
|
||||
client_secret>
|
||||
Remote config
|
||||
Use auto config?
|
||||
* Say Y if not sure
|
||||
* Say N if you are working on a remote or headless machine
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
--------------------
|
||||
[remote]
|
||||
client_id =
|
||||
client_secret =
|
||||
token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"bearer","expiry":"2016-12-29T12:27:11.362788025Z"}
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Yandex Disk. This only runs from the moment it
|
||||
opens your browser to the moment you get back the verification code.
|
||||
This is on `http://127.0.0.1:53682/` and this it may require you to
|
||||
unblock it temporarily if you are running a host firewall.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
See top level directories
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
Make a new directory
|
||||
|
||||
rclone mkdir remote:directory
|
||||
|
||||
List the contents of a directory
|
||||
|
||||
rclone ls remote:directory
|
||||
|
||||
Sync `/home/local/directory` to the remote path, deleting any
|
||||
excess files in the path.
|
||||
|
||||
rclone sync /home/local/directory remote:directory
|
||||
|
||||
### Modified time ###
|
||||
|
||||
Modified times are supported and are stored accurate to 1 ns in custom
|
||||
metadata called `rclone_modified` in RFC3339 with nanoseconds format.
|
||||
|
||||
### MD5 checksums ###
|
||||
|
||||
MD5 checksums are natively supported by Yandex Disk.
|
||||
@@ -2,8 +2,8 @@
|
||||
<div class="row">
|
||||
<hr>
|
||||
<div class="col-sm-12">
|
||||
<p>© <a href="http://www.craig-wood.com/nick/">Nick Craig-Wood</a> 2014<br>
|
||||
Website hosted on <a href="http://www.memset.com/cloud/storage/">Memset Memstore™</a>,
|
||||
<p>© <a href="http://www.craig-wood.com/nick/">Nick Craig-Wood</a> 2014-2016<br>
|
||||
Website hosted on a <a href="http://www.memset.com/cloud/storage/">FREE <span style="font-weight: bold; font-family: arial black, arial, sans-serif; font-style: italic;">MEMSET MEMSTORE™</span></a>,
|
||||
uploaded with <a href="http://rclone.org">rclone</a>
|
||||
and built with <a href="https://github.com/spf13/hugo">Hugo</a></p>
|
||||
</div>
|
||||
|
||||
@@ -8,6 +8,6 @@
|
||||
<br><g:plusone size="medium"></g:plusone>
|
||||
<script type="text/javascript" src="https://apis.google.com/js/plusone.js"></script>
|
||||
<br><iframe src="http://ghbtns.com/github-btn.html?user=ncw&repo=rclone&type=watch&count=true" allowtransparency="true" frameborder="0" scrolling="0" width="110px" height="20"></iframe>
|
||||
<br><iframe src="//www.facebook.com/plugins/like.php?href=http%3A%2F%2Frclone.org%2Fdocs%2Fplugins%2F&width=110&layout=button_count&action=like&show_faces=true&share=true&height=21&appId=232073720158744" scrolling="no" frameborder="0" style="border:none; overflow:hidden; width:200px; height:21px;" allowTransparency="true"></iframe>
|
||||
<br><iframe src="//www.facebook.com/plugins/like.php?href=http%3A%2F%2Frclone.org%2F&width=110&layout=button_count&action=like&show_faces=true&share=true&height=21&appId=232073720158744" scrolling="no" frameborder="0" style="border:none; overflow:hidden; width:200px; height:21px;" allowTransparency="true"></iframe>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
<span class="icon-bar"></span>
|
||||
<span class="icon-bar"></span>
|
||||
</button>
|
||||
<a class="navbar-brand" href="{{ .Site.BaseUrl }}"><i class="fa fa-home"></i> {{ .Site.Title }}</a>
|
||||
<a class="navbar-brand" href="{{ .Site.BaseURL }}"><i class="fa fa-home"></i> {{ .Site.Title }}</a>
|
||||
</div>
|
||||
<div class="collapse navbar-collapse navbar-ex1-collapse">
|
||||
<ul class="nav navbar-nav">
|
||||
@@ -32,12 +32,16 @@
|
||||
<ul class="dropdown-menu">
|
||||
<li><a href="/overview/"><i class="fa fa-archive"></i> Overview</a></li>
|
||||
<li><a href="/drive/"><i class="fa fa-google"></i> Drive</a></li>
|
||||
<li><a href="/s3/"><i class="fa fa-archive"></i> S3</a></li>
|
||||
<li><a href="/s3/"><i class="fa fa-amazon"></i> S3</a></li>
|
||||
<li><a href="/swift/"><i class="fa fa-space-shuttle"></i> Swift</a></li>
|
||||
<li><a href="/dropbox/"><i class="fa fa-dropbox"></i> Dropbox</a></li>
|
||||
<li><a href="/googlecloudstorage/"><i class="fa fa-google"></i> Google Cloud Storage</a></li>
|
||||
<li><a href="/amazonclouddrive/"><i class="fa fa-archive"></i> Amazon Cloud Drive</a></li>
|
||||
<li><a href="/amazonclouddrive/"><i class="fa fa-amazon"></i> Amazon Drive</a></li>
|
||||
<li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft One Drive</a></li>
|
||||
<li><a href="/hubic/"><i class="fa fa-space-shuttle"></i> Hubic</a></li>
|
||||
<li><a href="/b2/"><i class="fa fa-fire"></i> Backblaze B2</a></li>
|
||||
<li><a href="/local/"><i class="fa fa-file"></i> Local</a></li>
|
||||
<li><a href="/yandex/"><i class="fa fa-space-shuttle"></i> Yandex Disk</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li><a href="/contact/"><i class="fa fa-envelope"></i> Contact</a></li>
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
<div class="container">
|
||||
<div class="row">
|
||||
<div class="col-md-9">
|
||||
{{ range $key, $value := .Site.Indexes.groups.about.Pages }}
|
||||
{{ range $key, $value := .Site.Taxonomies.groups.about.Pages }}
|
||||
{{ $value.Content }}
|
||||
{{ end }}
|
||||
</div>
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
{{ range .Data.Pages }}
|
||||
<url>
|
||||
<loc>{{ .Permalink }}</loc>
|
||||
<lastmod>{{ safeHtml ( .Date.Format "2006-01-02T15:04:05-07:00" ) }}</lastmod>{{ with .Sitemap.ChangeFreq }}
|
||||
<lastmod>{{ safeHTML ( .Date.Format "2006-01-02T15:04:05-07:00" ) }}</lastmod>{{ with .Sitemap.ChangeFreq }}
|
||||
<changefreq>{{ . }}</changefreq>{{ end }}{{ if ge .Sitemap.Priority 0.0 }}
|
||||
<priority>{{ .Sitemap.Priority }}</priority>{{ end }}
|
||||
</url>
|
||||
|
||||
542
docs/static/css/font-awesome.css
vendored
542
docs/static/css/font-awesome.css
vendored
@@ -1,22 +1,21 @@
|
||||
/*!
|
||||
* Font Awesome 4.1.0 by @davegandy - http://fontawesome.io - @fontawesome
|
||||
* Font Awesome 4.4.0 by @davegandy - http://fontawesome.io - @fontawesome
|
||||
* License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
|
||||
*/
|
||||
/* FONT PATH
|
||||
* -------------------------- */
|
||||
@font-face {
|
||||
font-family: 'FontAwesome';
|
||||
src: url('../fonts/fontawesome-webfont.eot?v=4.1.0');
|
||||
src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.1.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff?v=4.1.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.1.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.1.0#fontawesomeregular') format('svg');
|
||||
src: url('../fonts/fontawesome-webfont.eot?v=4.4.0');
|
||||
src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.4.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff2?v=4.4.0') format('woff2'), url('../fonts/fontawesome-webfont.woff?v=4.4.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.4.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.4.0#fontawesomeregular') format('svg');
|
||||
font-weight: normal;
|
||||
font-style: normal;
|
||||
}
|
||||
.fa {
|
||||
display: inline-block;
|
||||
font-family: FontAwesome;
|
||||
font-style: normal;
|
||||
font-weight: normal;
|
||||
line-height: 1;
|
||||
font: normal normal normal 14px/1 FontAwesome;
|
||||
font-size: inherit;
|
||||
text-rendering: auto;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
}
|
||||
@@ -65,6 +64,19 @@
|
||||
border: solid 0.08em #eeeeee;
|
||||
border-radius: .1em;
|
||||
}
|
||||
.fa-pull-left {
|
||||
float: left;
|
||||
}
|
||||
.fa-pull-right {
|
||||
float: right;
|
||||
}
|
||||
.fa.fa-pull-left {
|
||||
margin-right: .3em;
|
||||
}
|
||||
.fa.fa-pull-right {
|
||||
margin-left: .3em;
|
||||
}
|
||||
/* Deprecated as of 4.4.0 */
|
||||
.pull-right {
|
||||
float: right;
|
||||
}
|
||||
@@ -78,36 +90,24 @@
|
||||
margin-left: .3em;
|
||||
}
|
||||
.fa-spin {
|
||||
-webkit-animation: spin 2s infinite linear;
|
||||
-moz-animation: spin 2s infinite linear;
|
||||
-o-animation: spin 2s infinite linear;
|
||||
animation: spin 2s infinite linear;
|
||||
-webkit-animation: fa-spin 2s infinite linear;
|
||||
animation: fa-spin 2s infinite linear;
|
||||
}
|
||||
@-moz-keyframes spin {
|
||||
0% {
|
||||
-moz-transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
-moz-transform: rotate(359deg);
|
||||
}
|
||||
.fa-pulse {
|
||||
-webkit-animation: fa-spin 1s infinite steps(8);
|
||||
animation: fa-spin 1s infinite steps(8);
|
||||
}
|
||||
@-webkit-keyframes spin {
|
||||
@-webkit-keyframes fa-spin {
|
||||
0% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
-webkit-transform: rotate(359deg);
|
||||
transform: rotate(359deg);
|
||||
}
|
||||
}
|
||||
@-o-keyframes spin {
|
||||
0% {
|
||||
-o-transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
-o-transform: rotate(359deg);
|
||||
}
|
||||
}
|
||||
@keyframes spin {
|
||||
@keyframes fa-spin {
|
||||
0% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg);
|
||||
@@ -120,43 +120,40 @@
|
||||
.fa-rotate-90 {
|
||||
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1);
|
||||
-webkit-transform: rotate(90deg);
|
||||
-moz-transform: rotate(90deg);
|
||||
-ms-transform: rotate(90deg);
|
||||
-o-transform: rotate(90deg);
|
||||
transform: rotate(90deg);
|
||||
}
|
||||
.fa-rotate-180 {
|
||||
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2);
|
||||
-webkit-transform: rotate(180deg);
|
||||
-moz-transform: rotate(180deg);
|
||||
-ms-transform: rotate(180deg);
|
||||
-o-transform: rotate(180deg);
|
||||
transform: rotate(180deg);
|
||||
}
|
||||
.fa-rotate-270 {
|
||||
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3);
|
||||
-webkit-transform: rotate(270deg);
|
||||
-moz-transform: rotate(270deg);
|
||||
-ms-transform: rotate(270deg);
|
||||
-o-transform: rotate(270deg);
|
||||
transform: rotate(270deg);
|
||||
}
|
||||
.fa-flip-horizontal {
|
||||
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);
|
||||
-webkit-transform: scale(-1, 1);
|
||||
-moz-transform: scale(-1, 1);
|
||||
-ms-transform: scale(-1, 1);
|
||||
-o-transform: scale(-1, 1);
|
||||
transform: scale(-1, 1);
|
||||
}
|
||||
.fa-flip-vertical {
|
||||
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);
|
||||
-webkit-transform: scale(1, -1);
|
||||
-moz-transform: scale(1, -1);
|
||||
-ms-transform: scale(1, -1);
|
||||
-o-transform: scale(1, -1);
|
||||
transform: scale(1, -1);
|
||||
}
|
||||
:root .fa-rotate-90,
|
||||
:root .fa-rotate-180,
|
||||
:root .fa-rotate-270,
|
||||
:root .fa-flip-horizontal,
|
||||
:root .fa-flip-vertical {
|
||||
filter: none;
|
||||
}
|
||||
.fa-stack {
|
||||
position: relative;
|
||||
display: inline-block;
|
||||
@@ -222,6 +219,8 @@
|
||||
.fa-check:before {
|
||||
content: "\f00c";
|
||||
}
|
||||
.fa-remove:before,
|
||||
.fa-close:before,
|
||||
.fa-times:before {
|
||||
content: "\f00d";
|
||||
}
|
||||
@@ -551,7 +550,8 @@
|
||||
.fa-arrows-h:before {
|
||||
content: "\f07e";
|
||||
}
|
||||
.fa-bar-chart-o:before {
|
||||
.fa-bar-chart-o:before,
|
||||
.fa-bar-chart:before {
|
||||
content: "\f080";
|
||||
}
|
||||
.fa-twitter-square:before {
|
||||
@@ -627,6 +627,7 @@
|
||||
.fa-twitter:before {
|
||||
content: "\f099";
|
||||
}
|
||||
.fa-facebook-f:before,
|
||||
.fa-facebook:before {
|
||||
content: "\f09a";
|
||||
}
|
||||
@@ -639,6 +640,7 @@
|
||||
.fa-credit-card:before {
|
||||
content: "\f09d";
|
||||
}
|
||||
.fa-feed:before,
|
||||
.fa-rss:before {
|
||||
content: "\f09e";
|
||||
}
|
||||
@@ -1276,7 +1278,8 @@
|
||||
.fa-male:before {
|
||||
content: "\f183";
|
||||
}
|
||||
.fa-gittip:before {
|
||||
.fa-gittip:before,
|
||||
.fa-gratipay:before {
|
||||
content: "\f184";
|
||||
}
|
||||
.fa-sun-o:before {
|
||||
@@ -1380,7 +1383,6 @@
|
||||
.fa-digg:before {
|
||||
content: "\f1a6";
|
||||
}
|
||||
.fa-pied-piper-square:before,
|
||||
.fa-pied-piper:before {
|
||||
content: "\f1a7";
|
||||
}
|
||||
@@ -1497,6 +1499,7 @@
|
||||
content: "\f1cc";
|
||||
}
|
||||
.fa-life-bouy:before,
|
||||
.fa-life-buoy:before,
|
||||
.fa-life-saver:before,
|
||||
.fa-support:before,
|
||||
.fa-life-ring:before {
|
||||
@@ -1519,6 +1522,8 @@
|
||||
.fa-git:before {
|
||||
content: "\f1d3";
|
||||
}
|
||||
.fa-y-combinator-square:before,
|
||||
.fa-yc-square:before,
|
||||
.fa-hacker-news:before {
|
||||
content: "\f1d4";
|
||||
}
|
||||
@@ -1564,3 +1569,458 @@
|
||||
.fa-bomb:before {
|
||||
content: "\f1e2";
|
||||
}
|
||||
.fa-soccer-ball-o:before,
|
||||
.fa-futbol-o:before {
|
||||
content: "\f1e3";
|
||||
}
|
||||
.fa-tty:before {
|
||||
content: "\f1e4";
|
||||
}
|
||||
.fa-binoculars:before {
|
||||
content: "\f1e5";
|
||||
}
|
||||
.fa-plug:before {
|
||||
content: "\f1e6";
|
||||
}
|
||||
.fa-slideshare:before {
|
||||
content: "\f1e7";
|
||||
}
|
||||
.fa-twitch:before {
|
||||
content: "\f1e8";
|
||||
}
|
||||
.fa-yelp:before {
|
||||
content: "\f1e9";
|
||||
}
|
||||
.fa-newspaper-o:before {
|
||||
content: "\f1ea";
|
||||
}
|
||||
.fa-wifi:before {
|
||||
content: "\f1eb";
|
||||
}
|
||||
.fa-calculator:before {
|
||||
content: "\f1ec";
|
||||
}
|
||||
.fa-paypal:before {
|
||||
content: "\f1ed";
|
||||
}
|
||||
.fa-google-wallet:before {
|
||||
content: "\f1ee";
|
||||
}
|
||||
.fa-cc-visa:before {
|
||||
content: "\f1f0";
|
||||
}
|
||||
.fa-cc-mastercard:before {
|
||||
content: "\f1f1";
|
||||
}
|
||||
.fa-cc-discover:before {
|
||||
content: "\f1f2";
|
||||
}
|
||||
.fa-cc-amex:before {
|
||||
content: "\f1f3";
|
||||
}
|
||||
.fa-cc-paypal:before {
|
||||
content: "\f1f4";
|
||||
}
|
||||
.fa-cc-stripe:before {
|
||||
content: "\f1f5";
|
||||
}
|
||||
.fa-bell-slash:before {
|
||||
content: "\f1f6";
|
||||
}
|
||||
.fa-bell-slash-o:before {
|
||||
content: "\f1f7";
|
||||
}
|
||||
.fa-trash:before {
|
||||
content: "\f1f8";
|
||||
}
|
||||
.fa-copyright:before {
|
||||
content: "\f1f9";
|
||||
}
|
||||
.fa-at:before {
|
||||
content: "\f1fa";
|
||||
}
|
||||
.fa-eyedropper:before {
|
||||
content: "\f1fb";
|
||||
}
|
||||
.fa-paint-brush:before {
|
||||
content: "\f1fc";
|
||||
}
|
||||
.fa-birthday-cake:before {
|
||||
content: "\f1fd";
|
||||
}
|
||||
.fa-area-chart:before {
|
||||
content: "\f1fe";
|
||||
}
|
||||
.fa-pie-chart:before {
|
||||
content: "\f200";
|
||||
}
|
||||
.fa-line-chart:before {
|
||||
content: "\f201";
|
||||
}
|
||||
.fa-lastfm:before {
|
||||
content: "\f202";
|
||||
}
|
||||
.fa-lastfm-square:before {
|
||||
content: "\f203";
|
||||
}
|
||||
.fa-toggle-off:before {
|
||||
content: "\f204";
|
||||
}
|
||||
.fa-toggle-on:before {
|
||||
content: "\f205";
|
||||
}
|
||||
.fa-bicycle:before {
|
||||
content: "\f206";
|
||||
}
|
||||
.fa-bus:before {
|
||||
content: "\f207";
|
||||
}
|
||||
.fa-ioxhost:before {
|
||||
content: "\f208";
|
||||
}
|
||||
.fa-angellist:before {
|
||||
content: "\f209";
|
||||
}
|
||||
.fa-cc:before {
|
||||
content: "\f20a";
|
||||
}
|
||||
.fa-shekel:before,
|
||||
.fa-sheqel:before,
|
||||
.fa-ils:before {
|
||||
content: "\f20b";
|
||||
}
|
||||
.fa-meanpath:before {
|
||||
content: "\f20c";
|
||||
}
|
||||
.fa-buysellads:before {
|
||||
content: "\f20d";
|
||||
}
|
||||
.fa-connectdevelop:before {
|
||||
content: "\f20e";
|
||||
}
|
||||
.fa-dashcube:before {
|
||||
content: "\f210";
|
||||
}
|
||||
.fa-forumbee:before {
|
||||
content: "\f211";
|
||||
}
|
||||
.fa-leanpub:before {
|
||||
content: "\f212";
|
||||
}
|
||||
.fa-sellsy:before {
|
||||
content: "\f213";
|
||||
}
|
||||
.fa-shirtsinbulk:before {
|
||||
content: "\f214";
|
||||
}
|
||||
.fa-simplybuilt:before {
|
||||
content: "\f215";
|
||||
}
|
||||
.fa-skyatlas:before {
|
||||
content: "\f216";
|
||||
}
|
||||
.fa-cart-plus:before {
|
||||
content: "\f217";
|
||||
}
|
||||
.fa-cart-arrow-down:before {
|
||||
content: "\f218";
|
||||
}
|
||||
.fa-diamond:before {
|
||||
content: "\f219";
|
||||
}
|
||||
.fa-ship:before {
|
||||
content: "\f21a";
|
||||
}
|
||||
.fa-user-secret:before {
|
||||
content: "\f21b";
|
||||
}
|
||||
.fa-motorcycle:before {
|
||||
content: "\f21c";
|
||||
}
|
||||
.fa-street-view:before {
|
||||
content: "\f21d";
|
||||
}
|
||||
.fa-heartbeat:before {
|
||||
content: "\f21e";
|
||||
}
|
||||
.fa-venus:before {
|
||||
content: "\f221";
|
||||
}
|
||||
.fa-mars:before {
|
||||
content: "\f222";
|
||||
}
|
||||
.fa-mercury:before {
|
||||
content: "\f223";
|
||||
}
|
||||
.fa-intersex:before,
|
||||
.fa-transgender:before {
|
||||
content: "\f224";
|
||||
}
|
||||
.fa-transgender-alt:before {
|
||||
content: "\f225";
|
||||
}
|
||||
.fa-venus-double:before {
|
||||
content: "\f226";
|
||||
}
|
||||
.fa-mars-double:before {
|
||||
content: "\f227";
|
||||
}
|
||||
.fa-venus-mars:before {
|
||||
content: "\f228";
|
||||
}
|
||||
.fa-mars-stroke:before {
|
||||
content: "\f229";
|
||||
}
|
||||
.fa-mars-stroke-v:before {
|
||||
content: "\f22a";
|
||||
}
|
||||
.fa-mars-stroke-h:before {
|
||||
content: "\f22b";
|
||||
}
|
||||
.fa-neuter:before {
|
||||
content: "\f22c";
|
||||
}
|
||||
.fa-genderless:before {
|
||||
content: "\f22d";
|
||||
}
|
||||
.fa-facebook-official:before {
|
||||
content: "\f230";
|
||||
}
|
||||
.fa-pinterest-p:before {
|
||||
content: "\f231";
|
||||
}
|
||||
.fa-whatsapp:before {
|
||||
content: "\f232";
|
||||
}
|
||||
.fa-server:before {
|
||||
content: "\f233";
|
||||
}
|
||||
.fa-user-plus:before {
|
||||
content: "\f234";
|
||||
}
|
||||
.fa-user-times:before {
|
||||
content: "\f235";
|
||||
}
|
||||
.fa-hotel:before,
|
||||
.fa-bed:before {
|
||||
content: "\f236";
|
||||
}
|
||||
.fa-viacoin:before {
|
||||
content: "\f237";
|
||||
}
|
||||
.fa-train:before {
|
||||
content: "\f238";
|
||||
}
|
||||
.fa-subway:before {
|
||||
content: "\f239";
|
||||
}
|
||||
.fa-medium:before {
|
||||
content: "\f23a";
|
||||
}
|
||||
.fa-yc:before,
|
||||
.fa-y-combinator:before {
|
||||
content: "\f23b";
|
||||
}
|
||||
.fa-optin-monster:before {
|
||||
content: "\f23c";
|
||||
}
|
||||
.fa-opencart:before {
|
||||
content: "\f23d";
|
||||
}
|
||||
.fa-expeditedssl:before {
|
||||
content: "\f23e";
|
||||
}
|
||||
.fa-battery-4:before,
|
||||
.fa-battery-full:before {
|
||||
content: "\f240";
|
||||
}
|
||||
.fa-battery-3:before,
|
||||
.fa-battery-three-quarters:before {
|
||||
content: "\f241";
|
||||
}
|
||||
.fa-battery-2:before,
|
||||
.fa-battery-half:before {
|
||||
content: "\f242";
|
||||
}
|
||||
.fa-battery-1:before,
|
||||
.fa-battery-quarter:before {
|
||||
content: "\f243";
|
||||
}
|
||||
.fa-battery-0:before,
|
||||
.fa-battery-empty:before {
|
||||
content: "\f244";
|
||||
}
|
||||
.fa-mouse-pointer:before {
|
||||
content: "\f245";
|
||||
}
|
||||
.fa-i-cursor:before {
|
||||
content: "\f246";
|
||||
}
|
||||
.fa-object-group:before {
|
||||
content: "\f247";
|
||||
}
|
||||
.fa-object-ungroup:before {
|
||||
content: "\f248";
|
||||
}
|
||||
.fa-sticky-note:before {
|
||||
content: "\f249";
|
||||
}
|
||||
.fa-sticky-note-o:before {
|
||||
content: "\f24a";
|
||||
}
|
||||
.fa-cc-jcb:before {
|
||||
content: "\f24b";
|
||||
}
|
||||
.fa-cc-diners-club:before {
|
||||
content: "\f24c";
|
||||
}
|
||||
.fa-clone:before {
|
||||
content: "\f24d";
|
||||
}
|
||||
.fa-balance-scale:before {
|
||||
content: "\f24e";
|
||||
}
|
||||
.fa-hourglass-o:before {
|
||||
content: "\f250";
|
||||
}
|
||||
.fa-hourglass-1:before,
|
||||
.fa-hourglass-start:before {
|
||||
content: "\f251";
|
||||
}
|
||||
.fa-hourglass-2:before,
|
||||
.fa-hourglass-half:before {
|
||||
content: "\f252";
|
||||
}
|
||||
.fa-hourglass-3:before,
|
||||
.fa-hourglass-end:before {
|
||||
content: "\f253";
|
||||
}
|
||||
.fa-hourglass:before {
|
||||
content: "\f254";
|
||||
}
|
||||
.fa-hand-grab-o:before,
|
||||
.fa-hand-rock-o:before {
|
||||
content: "\f255";
|
||||
}
|
||||
.fa-hand-stop-o:before,
|
||||
.fa-hand-paper-o:before {
|
||||
content: "\f256";
|
||||
}
|
||||
.fa-hand-scissors-o:before {
|
||||
content: "\f257";
|
||||
}
|
||||
.fa-hand-lizard-o:before {
|
||||
content: "\f258";
|
||||
}
|
||||
.fa-hand-spock-o:before {
|
||||
content: "\f259";
|
||||
}
|
||||
.fa-hand-pointer-o:before {
|
||||
content: "\f25a";
|
||||
}
|
||||
.fa-hand-peace-o:before {
|
||||
content: "\f25b";
|
||||
}
|
||||
.fa-trademark:before {
|
||||
content: "\f25c";
|
||||
}
|
||||
.fa-registered:before {
|
||||
content: "\f25d";
|
||||
}
|
||||
.fa-creative-commons:before {
|
||||
content: "\f25e";
|
||||
}
|
||||
.fa-gg:before {
|
||||
content: "\f260";
|
||||
}
|
||||
.fa-gg-circle:before {
|
||||
content: "\f261";
|
||||
}
|
||||
.fa-tripadvisor:before {
|
||||
content: "\f262";
|
||||
}
|
||||
.fa-odnoklassniki:before {
|
||||
content: "\f263";
|
||||
}
|
||||
.fa-odnoklassniki-square:before {
|
||||
content: "\f264";
|
||||
}
|
||||
.fa-get-pocket:before {
|
||||
content: "\f265";
|
||||
}
|
||||
.fa-wikipedia-w:before {
|
||||
content: "\f266";
|
||||
}
|
||||
.fa-safari:before {
|
||||
content: "\f267";
|
||||
}
|
||||
.fa-chrome:before {
|
||||
content: "\f268";
|
||||
}
|
||||
.fa-firefox:before {
|
||||
content: "\f269";
|
||||
}
|
||||
.fa-opera:before {
|
||||
content: "\f26a";
|
||||
}
|
||||
.fa-internet-explorer:before {
|
||||
content: "\f26b";
|
||||
}
|
||||
.fa-tv:before,
|
||||
.fa-television:before {
|
||||
content: "\f26c";
|
||||
}
|
||||
.fa-contao:before {
|
||||
content: "\f26d";
|
||||
}
|
||||
.fa-500px:before {
|
||||
content: "\f26e";
|
||||
}
|
||||
.fa-amazon:before {
|
||||
content: "\f270";
|
||||
}
|
||||
.fa-calendar-plus-o:before {
|
||||
content: "\f271";
|
||||
}
|
||||
.fa-calendar-minus-o:before {
|
||||
content: "\f272";
|
||||
}
|
||||
.fa-calendar-times-o:before {
|
||||
content: "\f273";
|
||||
}
|
||||
.fa-calendar-check-o:before {
|
||||
content: "\f274";
|
||||
}
|
||||
.fa-industry:before {
|
||||
content: "\f275";
|
||||
}
|
||||
.fa-map-pin:before {
|
||||
content: "\f276";
|
||||
}
|
||||
.fa-map-signs:before {
|
||||
content: "\f277";
|
||||
}
|
||||
.fa-map-o:before {
|
||||
content: "\f278";
|
||||
}
|
||||
.fa-map:before {
|
||||
content: "\f279";
|
||||
}
|
||||
.fa-commenting:before {
|
||||
content: "\f27a";
|
||||
}
|
||||
.fa-commenting-o:before {
|
||||
content: "\f27b";
|
||||
}
|
||||
.fa-houzz:before {
|
||||
content: "\f27c";
|
||||
}
|
||||
.fa-vimeo:before {
|
||||
content: "\f27d";
|
||||
}
|
||||
.fa-black-tie:before {
|
||||
content: "\f27e";
|
||||
}
|
||||
.fa-fonticons:before {
|
||||
content: "\f280";
|
||||
}
|
||||
|
||||
BIN
docs/static/fonts/FontAwesome.otf
vendored
BIN
docs/static/fonts/FontAwesome.otf
vendored
Binary file not shown.
BIN
docs/static/fonts/fontawesome-webfont.eot
vendored
Executable file → Normal file
BIN
docs/static/fonts/fontawesome-webfont.eot
vendored
Executable file → Normal file
Binary file not shown.
1064
docs/static/fonts/fontawesome-webfont.svg
vendored
Executable file → Normal file
1064
docs/static/fonts/fontawesome-webfont.svg
vendored
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
|
Before Width: | Height: | Size: 248 KiB After Width: | Height: | Size: 348 KiB |
BIN
docs/static/fonts/fontawesome-webfont.ttf
vendored
Executable file → Normal file
BIN
docs/static/fonts/fontawesome-webfont.ttf
vendored
Executable file → Normal file
Binary file not shown.
BIN
docs/static/fonts/fontawesome-webfont.woff
vendored
Executable file → Normal file
BIN
docs/static/fonts/fontawesome-webfont.woff
vendored
Executable file → Normal file
Binary file not shown.
BIN
docs/static/fonts/fontawesome-webfont.woff2
vendored
Normal file
BIN
docs/static/fonts/fontawesome-webfont.woff2
vendored
Normal file
Binary file not shown.
734
drive/drive.go
734
drive/drive.go
File diff suppressed because it is too large
Load Diff
63
drive/drive_internal_test.go
Normal file
63
drive/drive_internal_test.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package drive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/api/drive/v2"
|
||||
)
|
||||
|
||||
func TestInternalParseExtensions(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
wantErr error
|
||||
}{
|
||||
{"doc", []string{"doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
|
||||
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
|
||||
{"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
|
||||
} {
|
||||
f := new(Fs)
|
||||
gotErr := f.parseExtensions(test.in)
|
||||
if test.wantErr == nil {
|
||||
assert.NoError(t, gotErr)
|
||||
} else {
|
||||
assert.EqualError(t, gotErr, test.wantErr.Error())
|
||||
}
|
||||
assert.Equal(t, test.want, f.extensions)
|
||||
}
|
||||
|
||||
// Test it is appending
|
||||
f := new(Fs)
|
||||
assert.Nil(t, f.parseExtensions("docx,svg"))
|
||||
assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
|
||||
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
|
||||
|
||||
}
|
||||
|
||||
func TestInternalFindExportFormat(t *testing.T) {
|
||||
item := new(drive.File)
|
||||
item.ExportLinks = map[string]string{
|
||||
"application/pdf": "http://pdf",
|
||||
"application/rtf": "http://rtf",
|
||||
}
|
||||
for _, test := range []struct {
|
||||
extensions []string
|
||||
wantExtension string
|
||||
wantLink string
|
||||
}{
|
||||
{[]string{}, "", ""},
|
||||
{[]string{"pdf"}, "pdf", "http://pdf"},
|
||||
{[]string{"pdf", "rtf", "xls"}, "pdf", "http://pdf"},
|
||||
{[]string{"xls", "rtf", "pdf"}, "rtf", "http://rtf"},
|
||||
{[]string{"xls", "csv", "svg"}, "", ""},
|
||||
} {
|
||||
f := new(Fs)
|
||||
f.extensions = test.extensions
|
||||
gotExtension, gotLink := f.findExportFormat("file", item)
|
||||
assert.Equal(t, test.wantExtension, gotExtension)
|
||||
assert.Equal(t, test.wantLink, gotLink)
|
||||
}
|
||||
}
|
||||
@@ -13,44 +13,46 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*drive.FsObjectDrive)(nil))
|
||||
fstests.NilObject = fs.Object((*drive.Object)(nil))
|
||||
fstests.RemoteName = "TestDrive:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
|
||||
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/api/drive/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
@@ -36,7 +37,7 @@ const (
|
||||
// resumableUpload is used by the generated APIs to provide resumable uploads.
|
||||
// It is not used by developers directly.
|
||||
type resumableUpload struct {
|
||||
f *FsDrive
|
||||
f *Fs
|
||||
remote string
|
||||
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
|
||||
URI string
|
||||
@@ -51,13 +52,8 @@ type resumableUpload struct {
|
||||
}
|
||||
|
||||
// Upload the io.Reader in of size bytes with contentType and info
|
||||
func (f *FsDrive) Upload(in io.Reader, size int64, contentType string, info *drive.File, remote string) (*drive.File, error) {
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType string, info *drive.File, remote string) (*drive.File, error) {
|
||||
fileID := info.Id
|
||||
var body io.Reader
|
||||
body, err := googleapi.WithoutDataWrapper.JSONReader(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params := make(url.Values)
|
||||
params.Set("alt", "json")
|
||||
params.Set("uploadType", "resumable")
|
||||
@@ -69,16 +65,26 @@ func (f *FsDrive) Upload(in io.Reader, size int64, contentType string, info *dri
|
||||
method = "PUT"
|
||||
}
|
||||
urls += "?" + params.Encode()
|
||||
req, _ := http.NewRequest(method, urls, body)
|
||||
googleapi.Expand(req.URL, map[string]string{
|
||||
"fileId": fileID,
|
||||
})
|
||||
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
|
||||
req.Header.Set("X-Upload-Content-Type", contentType)
|
||||
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
|
||||
req.Header.Set("User-Agent", fs.UserAgent)
|
||||
var res *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var body io.Reader
|
||||
body, err = googleapi.WithoutDataWrapper.JSONReader(info)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var req *http.Request
|
||||
req, err = http.NewRequest(method, urls, body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
googleapi.Expand(req.URL, map[string]string{
|
||||
"fileId": fileID,
|
||||
})
|
||||
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
|
||||
req.Header.Set("X-Upload-Content-Type", contentType)
|
||||
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
|
||||
req.Header.Set("User-Agent", fs.UserAgent)
|
||||
res, err = f.client.Do(req)
|
||||
if err == nil {
|
||||
defer googleapi.CloseBody(res)
|
||||
@@ -138,7 +144,7 @@ func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return 0, fmt.Errorf("unexpected http return code %v", res.StatusCode)
|
||||
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
|
||||
}
|
||||
Range := res.Header.Get("Range")
|
||||
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
|
||||
@@ -147,7 +153,7 @@ func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
return start, nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("unable to parse range %q", Range)
|
||||
return 0, errors.Errorf("unable to parse range %q", Range)
|
||||
}
|
||||
|
||||
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
||||
|
||||
@@ -9,11 +9,11 @@ File system is case insensitive
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
@@ -21,15 +21,16 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stacktic/dropbox"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneAppKey = "5jcck7diasz0rqy"
|
||||
rcloneAppSecret = "m8WRxJ6b1Z/Y25fDwJWS"
|
||||
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
|
||||
rcloneAppKey = "5jcck7diasz0rqy"
|
||||
rcloneEncryptedAppSecret = "m8WRxJ6b1Z/Y25fDwJWS"
|
||||
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -44,10 +45,11 @@ var (
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.Info{
|
||||
Name: "dropbox",
|
||||
NewFs: NewFs,
|
||||
Config: configHelper,
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "dropbox",
|
||||
Description: "Dropbox",
|
||||
NewFs: NewFs,
|
||||
Config: configHelper,
|
||||
Options: []fs.Option{{
|
||||
Name: "app_key",
|
||||
Help: "Dropbox App Key - leave blank normally.",
|
||||
@@ -92,8 +94,8 @@ func configHelper(name string) {
|
||||
}
|
||||
}
|
||||
|
||||
// FsDropbox represents a remote dropbox server
|
||||
type FsDropbox struct {
|
||||
// Fs represents a remote dropbox server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
db *dropbox.Dropbox // the connection to the dropbox server
|
||||
root string // the path we are working on
|
||||
@@ -101,29 +103,29 @@ type FsDropbox struct {
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
}
|
||||
|
||||
// FsObjectDropbox describes a dropbox object
|
||||
type FsObjectDropbox struct {
|
||||
dropbox *FsDropbox // what this object is part of
|
||||
remote string // The remote path
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
hasMetadata bool // metadata is valid
|
||||
// Object describes a dropbox object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
hasMetadata bool // metadata is valid
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *FsDropbox) Name() string {
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *FsDropbox) Root() string {
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this FsDropbox to a string
|
||||
func (f *FsDropbox) String() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Dropbox root '%s'", f.root)
|
||||
}
|
||||
|
||||
@@ -137,23 +139,23 @@ func newDropbox(name string) (*dropbox.Dropbox, error) {
|
||||
}
|
||||
appSecret := fs.ConfigFile.MustValue(name, "app_secret")
|
||||
if appSecret == "" {
|
||||
appSecret = fs.Reveal(rcloneAppSecret)
|
||||
appSecret = fs.Reveal(rcloneEncryptedAppSecret)
|
||||
}
|
||||
|
||||
err := db.SetAppInfo(appKey, appSecret)
|
||||
return db, err
|
||||
}
|
||||
|
||||
// NewFs contstructs an FsDropbox from the path, container:path
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadChunkSize > maxUploadChunkSize {
|
||||
return nil, fmt.Errorf("Chunk size too big, must be < %v", maxUploadChunkSize)
|
||||
return nil, errors.Errorf("chunk size too big, must be < %v", maxUploadChunkSize)
|
||||
}
|
||||
db, err := newDropbox(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &FsDropbox{
|
||||
f := &Fs{
|
||||
name: name,
|
||||
db: db,
|
||||
}
|
||||
@@ -171,22 +173,20 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// See if the root is actually an object
|
||||
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
|
||||
if err == nil && !entry.IsDir {
|
||||
remote := path.Base(f.root)
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
}
|
||||
f.setRoot(newRoot)
|
||||
obj := f.NewFsObject(remote)
|
||||
// return a Fs Limited to this object
|
||||
return fs.NewLimited(f, obj), nil
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Sets root in f
|
||||
func (f *FsDropbox) setRoot(root string) {
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = strings.Trim(root, "/")
|
||||
lowerCaseRoot := strings.ToLower(f.root)
|
||||
|
||||
@@ -197,166 +197,224 @@ func (f *FsDropbox) setRoot(root string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
// Return an Object from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object {
|
||||
o := &FsObjectDropbox{
|
||||
dropbox: f,
|
||||
remote: remote,
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *dropbox.Entry) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
o.setMetadataFromEntry(info)
|
||||
} else {
|
||||
err := o.readEntryAndSetMetadata()
|
||||
if err != nil {
|
||||
// logged already fs.Debug("Failed to read info: %s", err)
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return o
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewFsObject returns an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsDropbox) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// Strips the root off path and returns it
|
||||
func (f *FsDropbox) stripRoot(path string) *string {
|
||||
lowercase := strings.ToLower(path)
|
||||
|
||||
if !strings.HasPrefix(lowercase, f.slashRootSlash) {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Path '%s' is not under root '%s'", path, f.slashRootSlash)
|
||||
return nil
|
||||
func strip(path, root string) (string, error) {
|
||||
if len(root) > 0 {
|
||||
if root[0] != '/' {
|
||||
root = "/" + root
|
||||
}
|
||||
if root[len(root)-1] != '/' {
|
||||
root += "/"
|
||||
}
|
||||
} else if len(root) == 0 {
|
||||
root = "/"
|
||||
}
|
||||
|
||||
stripped := path[len(f.slashRootSlash):]
|
||||
return &stripped
|
||||
lowercase := strings.ToLower(path)
|
||||
if !strings.HasPrefix(lowercase, root) {
|
||||
return "", errors.Errorf("path %q is not under root %q", path, root)
|
||||
}
|
||||
return path[len(root):], nil
|
||||
}
|
||||
|
||||
// Walk the root returning a channel of FsObjects
|
||||
func (f *FsDropbox) list(out fs.ObjectsChan) {
|
||||
// Strips the root off path and returns it
|
||||
func (f *Fs) stripRoot(path string) (string, error) {
|
||||
return strip(path, f.slashRootSlash)
|
||||
}
|
||||
|
||||
// Walk the root returning a channel of Objects
|
||||
func (f *Fs) list(out fs.ListOpts, dir string) {
|
||||
// Track path component case, it could be different for entries coming from DropBox API
|
||||
// See https://www.dropboxforum.com/hc/communities/public/questions/201665409-Wrong-character-case-of-folder-name-when-calling-listFolder-using-Sync-API?locale=en-us
|
||||
// and https://github.com/ncw/rclone/issues/53
|
||||
nameTree := newNameTree()
|
||||
cursor := ""
|
||||
for {
|
||||
deltaPage, err := f.db.Delta(cursor, f.slashRoot)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't list: %s", err)
|
||||
break
|
||||
} else {
|
||||
if deltaPage.Reset && cursor != "" {
|
||||
fs.ErrorLog(f, "Unexpected reset during listing - try again")
|
||||
fs.Stats.Error()
|
||||
break
|
||||
}
|
||||
fs.Debug(f, "%d delta entries received", len(deltaPage.Entries))
|
||||
for i := range deltaPage.Entries {
|
||||
deltaEntry := &deltaPage.Entries[i]
|
||||
entry := deltaEntry.Entry
|
||||
if entry == nil {
|
||||
// This notifies of a deleted object
|
||||
} else {
|
||||
if len(entry.Path) <= 1 || entry.Path[0] != '/' {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "dropbox API inconsistency: a path should always start with a slash and be at least 2 characters: %s", entry.Path)
|
||||
continue
|
||||
}
|
||||
|
||||
lastSlashIndex := strings.LastIndex(entry.Path, "/")
|
||||
|
||||
var parentPath string
|
||||
if lastSlashIndex == 0 {
|
||||
parentPath = ""
|
||||
} else {
|
||||
parentPath = entry.Path[1:lastSlashIndex]
|
||||
}
|
||||
lastComponent := entry.Path[lastSlashIndex+1:]
|
||||
|
||||
if entry.IsDir {
|
||||
nameTree.PutCaseCorrectDirectoryName(parentPath, lastComponent)
|
||||
} else {
|
||||
parentPathCorrectCase := nameTree.GetPathWithCorrectCase(parentPath)
|
||||
if parentPathCorrectCase != nil {
|
||||
path := f.stripRoot(*parentPathCorrectCase + "/" + lastComponent)
|
||||
if path == nil {
|
||||
// an error occurred and logged by stripRoot
|
||||
continue
|
||||
}
|
||||
|
||||
out <- f.newFsObjectWithInfo(*path, entry)
|
||||
} else {
|
||||
nameTree.PutFile(parentPath, lastComponent, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !deltaPage.HasMore {
|
||||
break
|
||||
}
|
||||
cursor = deltaPage.Cursor.Cursor
|
||||
}
|
||||
root := f.slashRoot
|
||||
if dir != "" {
|
||||
root += "/" + dir
|
||||
// We assume that dir is entered in the correct case
|
||||
// here which is likely since it probably came from a
|
||||
// directory listing
|
||||
nameTree.PutCaseCorrectPath(strings.Trim(root, "/"))
|
||||
}
|
||||
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropbox.Entry) {
|
||||
path := f.stripRoot("/" + caseCorrectFilePath)
|
||||
if path == nil {
|
||||
// an error occurred and logged by stripRoot
|
||||
for {
|
||||
deltaPage, err := f.db.Delta(cursor, root)
|
||||
if err != nil {
|
||||
out.SetError(errors.Wrap(err, "couldn't list"))
|
||||
return
|
||||
}
|
||||
if deltaPage.Reset && cursor != "" {
|
||||
err = errors.New("unexpected reset during listing")
|
||||
out.SetError(err)
|
||||
break
|
||||
}
|
||||
fs.Debug(f, "%d delta entries received", len(deltaPage.Entries))
|
||||
for i := range deltaPage.Entries {
|
||||
deltaEntry := &deltaPage.Entries[i]
|
||||
entry := deltaEntry.Entry
|
||||
if entry == nil {
|
||||
// This notifies of a deleted object
|
||||
} else {
|
||||
if len(entry.Path) <= 1 || entry.Path[0] != '/' {
|
||||
fs.Log(f, "dropbox API inconsistency: a path should always start with a slash and be at least 2 characters: %s", entry.Path)
|
||||
continue
|
||||
}
|
||||
|
||||
out <- f.newFsObjectWithInfo(*path, entry)
|
||||
}
|
||||
nameTree.WalkFiles(f.root, walkFunc)
|
||||
}
|
||||
lastSlashIndex := strings.LastIndex(entry.Path, "/")
|
||||
|
||||
// List walks the path returning a channel of FsObjects
|
||||
func (f *FsDropbox) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
f.list(out)
|
||||
}()
|
||||
return out
|
||||
}
|
||||
var parentPath string
|
||||
if lastSlashIndex == 0 {
|
||||
parentPath = ""
|
||||
} else {
|
||||
parentPath = entry.Path[1:lastSlashIndex]
|
||||
}
|
||||
lastComponent := entry.Path[lastSlashIndex+1:]
|
||||
|
||||
// ListDir walks the path returning a channel of FsObjects
|
||||
func (f *FsDropbox) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
entry, err := f.db.Metadata(f.root, true, false, "", "", metadataLimit)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't list directories in root: %s", err)
|
||||
} else {
|
||||
for i := range entry.Contents {
|
||||
entry := &entry.Contents[i]
|
||||
if entry.IsDir {
|
||||
name := f.stripRoot(entry.Path)
|
||||
if name == nil {
|
||||
// an error occurred and logged by stripRoot
|
||||
continue
|
||||
nameTree.PutCaseCorrectDirectoryName(parentPath, lastComponent)
|
||||
name, err := f.stripRoot(entry.Path + "/")
|
||||
if err != nil {
|
||||
out.SetError(err)
|
||||
return
|
||||
}
|
||||
|
||||
out <- &fs.Dir{
|
||||
Name: *name,
|
||||
When: time.Time(entry.ClientMtime),
|
||||
Bytes: entry.Bytes,
|
||||
Count: -1,
|
||||
name = strings.Trim(name, "/")
|
||||
if name != "" && name != dir {
|
||||
dir := &fs.Dir{
|
||||
Name: name,
|
||||
When: time.Time(entry.ClientMtime),
|
||||
Bytes: entry.Bytes,
|
||||
Count: -1,
|
||||
}
|
||||
if out.AddDir(dir) {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
parentPathCorrectCase := nameTree.GetPathWithCorrectCase(parentPath)
|
||||
if parentPathCorrectCase != nil {
|
||||
path, err := f.stripRoot(*parentPathCorrectCase + "/" + lastComponent)
|
||||
if err != nil {
|
||||
out.SetError(err)
|
||||
return
|
||||
}
|
||||
o, err := f.newObjectWithInfo(path, entry)
|
||||
if err != nil {
|
||||
out.SetError(err)
|
||||
return
|
||||
}
|
||||
if out.Add(o) {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
nameTree.PutFile(parentPath, lastComponent, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
if !deltaPage.HasMore {
|
||||
break
|
||||
}
|
||||
cursor = deltaPage.Cursor.Cursor
|
||||
|
||||
}
|
||||
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropbox.Entry) error {
|
||||
path, err := f.stripRoot("/" + caseCorrectFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o, err := f.newObjectWithInfo(path, entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if out.Add(o) {
|
||||
return fs.ErrorListAborted
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err := nameTree.WalkFiles(f.root, walkFunc)
|
||||
if err != nil {
|
||||
out.SetError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// listOneLevel walks the path one level deep
|
||||
func (f *Fs) listOneLevel(out fs.ListOpts, dir string) {
|
||||
root := f.root
|
||||
if dir != "" {
|
||||
root += "/" + dir
|
||||
}
|
||||
dirEntry, err := f.db.Metadata(root, true, false, "", "", metadataLimit)
|
||||
if err != nil {
|
||||
out.SetError(errors.Wrap(err, "couldn't list single level"))
|
||||
return
|
||||
}
|
||||
for i := range dirEntry.Contents {
|
||||
entry := &dirEntry.Contents[i]
|
||||
remote, err := strip(entry.Path, root)
|
||||
if err != nil {
|
||||
out.SetError(err)
|
||||
return
|
||||
}
|
||||
if entry.IsDir {
|
||||
dir := &fs.Dir{
|
||||
Name: remote,
|
||||
When: time.Time(entry.ClientMtime),
|
||||
Bytes: entry.Bytes,
|
||||
Count: -1,
|
||||
}
|
||||
if out.AddDir(dir) {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(remote, entry)
|
||||
if err != nil {
|
||||
out.SetError(err)
|
||||
return
|
||||
}
|
||||
if out.Add(o) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// List walks the path returning a channel of Objects
|
||||
func (f *Fs) List(out fs.ListOpts, dir string) {
|
||||
defer out.Finished()
|
||||
level := out.Level()
|
||||
switch level {
|
||||
case 1:
|
||||
f.listOneLevel(out, dir)
|
||||
case fs.MaxLevel:
|
||||
f.list(out, dir)
|
||||
default:
|
||||
out.SetError(fs.ErrorLevelNotSupported)
|
||||
}
|
||||
}
|
||||
|
||||
// A read closer which doesn't close the input
|
||||
@@ -379,20 +437,23 @@ func (rc *readCloser) Close() error {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *FsDropbox) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction
|
||||
o := &FsObjectDropbox{dropbox: f, remote: remote}
|
||||
return o, o.Update(in, modTime, size)
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return o, o.Update(in, src)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *FsDropbox) Mkdir() error {
|
||||
func (f *Fs) Mkdir() error {
|
||||
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
|
||||
if err == nil {
|
||||
if entry.IsDir {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%q already exists as file", f.root)
|
||||
return errors.Errorf("%q already exists as file", f.root)
|
||||
}
|
||||
_, err = f.db.CreateFolder(f.slashRoot)
|
||||
return err
|
||||
@@ -401,19 +462,19 @@ func (f *FsDropbox) Mkdir() error {
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *FsDropbox) Rmdir() error {
|
||||
func (f *Fs) Rmdir() error {
|
||||
entry, err := f.db.Metadata(f.slashRoot, true, false, "", "", 16)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(entry.Contents) != 0 {
|
||||
return errors.New("Directory not empty")
|
||||
return errors.New("directory not empty")
|
||||
}
|
||||
return f.Purge()
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
func (f *FsDropbox) Precision() time.Duration {
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
@@ -426,21 +487,24 @@ func (f *FsDropbox) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsDropbox) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectDropbox)
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Temporary FsObject under construction
|
||||
dstObj := &FsObjectDropbox{dropbox: f, remote: remote}
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
srcPath := srcObj.remotePath()
|
||||
dstPath := dstObj.remotePath()
|
||||
entry, err := f.db.Copy(srcPath, dstPath, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy failed: %s", err)
|
||||
return nil, errors.Wrap(err, "copy failed")
|
||||
}
|
||||
dstObj.setMetadataFromEntry(entry)
|
||||
return dstObj, nil
|
||||
@@ -451,7 +515,7 @@ func (f *FsDropbox) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *FsDropbox) Purge() error {
|
||||
func (f *Fs) Purge() error {
|
||||
// Let dropbox delete the filesystem tree
|
||||
_, err := f.db.Delete(f.slashRoot)
|
||||
return err
|
||||
@@ -466,21 +530,24 @@ func (f *FsDropbox) Purge() error {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *FsDropbox) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectDropbox)
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Temporary FsObject under construction
|
||||
dstObj := &FsObjectDropbox{dropbox: f, remote: remote}
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
srcPath := srcObj.remotePath()
|
||||
dstPath := dstObj.remotePath()
|
||||
entry, err := f.db.Move(srcPath, dstPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Move failed: %s", err)
|
||||
return nil, errors.Wrap(err, "move failed")
|
||||
}
|
||||
dstObj.setMetadataFromEntry(entry)
|
||||
return dstObj, nil
|
||||
@@ -493,8 +560,8 @@ func (f *FsDropbox) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *FsDropbox) DirMove(src fs.Fs) error {
|
||||
srcFs, ok := src.(*FsDropbox)
|
||||
func (f *Fs) DirMove(src fs.Fs) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debug(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
@@ -509,20 +576,25 @@ func (f *FsDropbox) DirMove(src fs.Fs) error {
|
||||
// Do the move
|
||||
_, err = f.db.Move(srcFs.slashRoot, f.slashRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("MoveDir failed: %v", err)
|
||||
return errors.Wrap(err, "MoveDir failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() fs.HashSet {
|
||||
return fs.HashSet(fs.HashNone)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *FsObjectDropbox) Fs() fs.Fs {
|
||||
return o.dropbox
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *FsObjectDropbox) String() string {
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
@@ -530,41 +602,45 @@ func (o *FsObjectDropbox) String() string {
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *FsObjectDropbox) Remote() string {
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *FsObjectDropbox) Md5sum() (string, error) {
|
||||
return "", nil
|
||||
// Hash is unsupported on Dropbox
|
||||
func (o *Object) Hash(t fs.HashType) (string, error) {
|
||||
return "", fs.ErrHashUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *FsObjectDropbox) Size() int64 {
|
||||
func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// setMetadataFromEntry sets the fs data from a dropbox.Entry
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inacurate date
|
||||
func (o *FsObjectDropbox) setMetadataFromEntry(info *dropbox.Entry) {
|
||||
func (o *Object) setMetadataFromEntry(info *dropbox.Entry) {
|
||||
o.bytes = info.Bytes
|
||||
o.modTime = time.Time(info.ClientMtime)
|
||||
o.hasMetadata = true
|
||||
}
|
||||
|
||||
// Reads the entry from dropbox
|
||||
func (o *FsObjectDropbox) readEntry() (*dropbox.Entry, error) {
|
||||
entry, err := o.dropbox.db.Metadata(o.remotePath(), false, false, "", "", metadataLimit)
|
||||
func (o *Object) readEntry() (*dropbox.Entry, error) {
|
||||
entry, err := o.fs.db.Metadata(o.remotePath(), false, false, "", "", metadataLimit)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Error reading file: %s", err)
|
||||
return nil, fmt.Errorf("Error reading file: %s", err)
|
||||
if dropboxErr, ok := err.(*dropbox.Error); ok {
|
||||
if dropboxErr.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// Read entry if not set and set metadata from it
|
||||
func (o *FsObjectDropbox) readEntryAndSetMetadata() error {
|
||||
func (o *Object) readEntryAndSetMetadata() error {
|
||||
// Last resort set time from client
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
@@ -578,8 +654,8 @@ func (o *FsObjectDropbox) readEntryAndSetMetadata() error {
|
||||
}
|
||||
|
||||
// Returns the remote path for the object
|
||||
func (o *FsObjectDropbox) remotePath() string {
|
||||
return o.dropbox.slashRootSlash + o.remote
|
||||
func (o *Object) remotePath() string {
|
||||
return o.fs.slashRootSlash + o.remote
|
||||
}
|
||||
|
||||
// Returns the key for the metadata database for a given path
|
||||
@@ -592,12 +668,12 @@ func metadataKey(path string) string {
|
||||
}
|
||||
|
||||
// Returns the key for the metadata database
|
||||
func (o *FsObjectDropbox) metadataKey() string {
|
||||
func (o *Object) metadataKey() string {
|
||||
return metadataKey(o.remotePath())
|
||||
}
|
||||
|
||||
// readMetaData gets the info if it hasn't already been fetched
|
||||
func (o *FsObjectDropbox) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.hasMetadata {
|
||||
return nil
|
||||
}
|
||||
@@ -609,10 +685,10 @@ func (o *FsObjectDropbox) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *FsObjectDropbox) ModTime() time.Time {
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
fs.Log(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
return o.modTime
|
||||
@@ -621,19 +697,26 @@ func (o *FsObjectDropbox) ModTime() time.Time {
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
//
|
||||
// Commits the datastore
|
||||
func (o *FsObjectDropbox) SetModTime(modTime time.Time) {
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
// FIXME not implemented
|
||||
return
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Storable returns whether this object is storable
|
||||
func (o *FsObjectDropbox) Storable() bool {
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectDropbox) Open() (in io.ReadCloser, err error) {
|
||||
in, _, err = o.dropbox.db.Download(o.remotePath(), "", 0)
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
in, _, err = o.fs.db.Download(o.remotePath(), "", 0)
|
||||
if dropboxErr, ok := err.(*dropbox.Error); ok {
|
||||
// Dropbox return 461 for copyright violation so don't
|
||||
// attempt to retry this error
|
||||
if dropboxErr.StatusCode == 461 {
|
||||
return nil, fs.NoRetryError(err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -642,32 +725,32 @@ func (o *FsObjectDropbox) Open() (in io.ReadCloser, err error) {
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectDropbox) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
fs.ErrorLog(o, "File name disallowed - not uploading")
|
||||
fs.Log(o, "File name disallowed - not uploading")
|
||||
return nil
|
||||
}
|
||||
entry, err := o.dropbox.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "")
|
||||
entry, err := o.fs.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Upload failed: %s", err)
|
||||
return errors.Wrap(err, "upload failed")
|
||||
}
|
||||
o.setMetadataFromEntry(entry)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectDropbox) Remove() error {
|
||||
_, err := o.dropbox.db.Delete(o.remotePath())
|
||||
func (o *Object) Remove() error {
|
||||
_, err := o.fs.db.Delete(o.remotePath())
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*FsDropbox)(nil)
|
||||
_ fs.Copier = (*FsDropbox)(nil)
|
||||
_ fs.Purger = (*FsDropbox)(nil)
|
||||
_ fs.Mover = (*FsDropbox)(nil)
|
||||
_ fs.DirMover = (*FsDropbox)(nil)
|
||||
_ fs.Object = (*FsObjectDropbox)(nil)
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -13,44 +13,46 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*dropbox.FsObjectDropbox)(nil))
|
||||
fstests.NilObject = fs.Object((*dropbox.Object)(nil))
|
||||
fstests.RemoteName = "TestDropbox:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
|
||||
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"github.com/stacktic/dropbox"
|
||||
)
|
||||
|
||||
// FIXME Get rid of Stats.Error() counting and return errors
|
||||
|
||||
type nameTreeNode struct {
|
||||
// Map from lowercase directory name to tree node
|
||||
Directories map[string]*nameTreeNode
|
||||
@@ -46,7 +48,6 @@ func (tree *nameTreeNode) getTreeNode(path string) *nameTreeNode {
|
||||
// no lookup required, just return root
|
||||
return tree
|
||||
}
|
||||
|
||||
current := tree
|
||||
for _, component := range strings.Split(path, "/") {
|
||||
if len(component) == 0 {
|
||||
@@ -69,6 +70,29 @@ func (tree *nameTreeNode) getTreeNode(path string) *nameTreeNode {
|
||||
return current
|
||||
}
|
||||
|
||||
// PutCaseCorrectPath puts a known good path into the nameTree
|
||||
func (tree *nameTreeNode) PutCaseCorrectPath(caseCorrectPath string) {
|
||||
if len(caseCorrectPath) == 0 {
|
||||
return
|
||||
}
|
||||
current := tree
|
||||
for _, component := range strings.Split(caseCorrectPath, "/") {
|
||||
if len(component) == 0 {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "PutCaseCorrectPath: path component is empty (full path %q)", caseCorrectPath)
|
||||
return
|
||||
}
|
||||
lowercase := strings.ToLower(component)
|
||||
lookup := current.Directories[lowercase]
|
||||
if lookup == nil {
|
||||
lookup = newNameTreeNode(component)
|
||||
current.Directories[lowercase] = lookup
|
||||
}
|
||||
current = lookup
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (tree *nameTreeNode) PutCaseCorrectDirectoryName(parentPath string, caseCorrectDirectoryName string) {
|
||||
if len(caseCorrectDirectoryName) == 0 {
|
||||
fs.Stats.Error()
|
||||
@@ -143,9 +167,9 @@ func (tree *nameTreeNode) GetPathWithCorrectCase(path string) *string {
|
||||
return &resultString
|
||||
}
|
||||
|
||||
type nameTreeFileWalkFunc func(caseCorrectFilePath string, entry *dropbox.Entry)
|
||||
type nameTreeFileWalkFunc func(caseCorrectFilePath string, entry *dropbox.Entry) error
|
||||
|
||||
func (tree *nameTreeNode) walkFilesRec(currentPath string, walkFunc nameTreeFileWalkFunc) {
|
||||
func (tree *nameTreeNode) walkFilesRec(currentPath string, walkFunc nameTreeFileWalkFunc) error {
|
||||
var prefix string
|
||||
if currentPath == "" {
|
||||
prefix = ""
|
||||
@@ -154,7 +178,10 @@ func (tree *nameTreeNode) walkFilesRec(currentPath string, walkFunc nameTreeFile
|
||||
}
|
||||
|
||||
for name, entry := range tree.Files {
|
||||
walkFunc(prefix+name, entry)
|
||||
err := walkFunc(prefix+name, entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for lowerCaseName, directory := range tree.Directories {
|
||||
@@ -165,15 +192,20 @@ func (tree *nameTreeNode) walkFilesRec(currentPath string, walkFunc nameTreeFile
|
||||
continue
|
||||
}
|
||||
|
||||
directory.walkFilesRec(prefix+caseCorrectName, walkFunc)
|
||||
err := directory.walkFilesRec(prefix+caseCorrectName, walkFunc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tree *nameTreeNode) WalkFiles(rootPath string, walkFunc nameTreeFileWalkFunc) {
|
||||
func (tree *nameTreeNode) WalkFiles(rootPath string, walkFunc nameTreeFileWalkFunc) error {
|
||||
node := tree.getTreeNode(rootPath)
|
||||
if node == nil {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
node.walkFilesRec(rootPath, walkFunc)
|
||||
return node.walkFilesRec(rootPath, walkFunc)
|
||||
}
|
||||
|
||||
@@ -5,32 +5,47 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
dropboxapi "github.com/stacktic/dropbox"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func assert(t *testing.T, shouldBeTrue bool, failMessage string) {
|
||||
if !shouldBeTrue {
|
||||
t.Fatal(failMessage)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutCaseCorrectDirectoryName(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := newNameTree()
|
||||
tree.PutCaseCorrectDirectoryName("a/b", "C")
|
||||
|
||||
assert(t, tree.CaseCorrectName == "", "Root CaseCorrectName should be empty")
|
||||
assert.Equal(t, "", tree.CaseCorrectName, "Root CaseCorrectName should be empty")
|
||||
|
||||
a := tree.Directories["a"]
|
||||
assert(t, a.CaseCorrectName == "", "CaseCorrectName at 'a' should be empty")
|
||||
assert.Equal(t, "", a.CaseCorrectName, "CaseCorrectName at 'a' should be empty")
|
||||
|
||||
b := a.Directories["b"]
|
||||
assert(t, b.CaseCorrectName == "", "CaseCorrectName at 'a/b' should be empty")
|
||||
assert.Equal(t, "", b.CaseCorrectName, "CaseCorrectName at 'a/b' should be empty")
|
||||
|
||||
c := b.Directories["c"]
|
||||
assert(t, c.CaseCorrectName == "C", "CaseCorrectName at 'a/b/c' should be 'C'")
|
||||
assert.Equal(t, "C", c.CaseCorrectName, "CaseCorrectName at 'a/b/c' should be 'C'")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
assert.Equal(t, errors, fs.Stats.GetErrors(), "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutCaseCorrectPath(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := newNameTree()
|
||||
tree.PutCaseCorrectPath("A/b/C")
|
||||
|
||||
assert.Equal(t, "", tree.CaseCorrectName, "Root CaseCorrectName should be empty")
|
||||
|
||||
a := tree.Directories["a"]
|
||||
assert.Equal(t, "A", a.CaseCorrectName, "CaseCorrectName at 'a' should be 'A'")
|
||||
|
||||
b := a.Directories["b"]
|
||||
assert.Equal(t, "b", b.CaseCorrectName, "CaseCorrectName at 'a/b' should be 'b'")
|
||||
|
||||
c := b.Directories["c"]
|
||||
assert.Equal(t, "C", c.CaseCorrectName, "CaseCorrectName at 'a/b/c' should be 'C'")
|
||||
|
||||
assert.Equal(t, errors, fs.Stats.GetErrors(), "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutCaseCorrectDirectoryNameEmptyComponent(t *testing.T) {
|
||||
@@ -41,7 +56,7 @@ func TestPutCaseCorrectDirectoryNameEmptyComponent(t *testing.T) {
|
||||
tree.PutCaseCorrectDirectoryName("b/", "C")
|
||||
tree.PutCaseCorrectDirectoryName("a//b", "C")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors+3, "3 errors should be reported")
|
||||
assert.True(t, fs.Stats.GetErrors() == errors+3, "3 errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutCaseCorrectDirectoryNameEmptyParent(t *testing.T) {
|
||||
@@ -51,9 +66,9 @@ func TestPutCaseCorrectDirectoryNameEmptyParent(t *testing.T) {
|
||||
tree.PutCaseCorrectDirectoryName("", "C")
|
||||
|
||||
c := tree.Directories["c"]
|
||||
assert(t, c.CaseCorrectName == "C", "CaseCorrectName at 'c' should be 'C'")
|
||||
assert.True(t, c.CaseCorrectName == "C", "CaseCorrectName at 'c' should be 'C'")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
assert.True(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestGetPathWithCorrectCase(t *testing.T) {
|
||||
@@ -61,12 +76,12 @@ func TestGetPathWithCorrectCase(t *testing.T) {
|
||||
|
||||
tree := newNameTree()
|
||||
tree.PutCaseCorrectDirectoryName("a", "C")
|
||||
assert(t, tree.GetPathWithCorrectCase("a/c") == nil, "Path for 'a' should not be available")
|
||||
assert.True(t, tree.GetPathWithCorrectCase("a/c") == nil, "Path for 'a' should not be available")
|
||||
|
||||
tree.PutCaseCorrectDirectoryName("", "A")
|
||||
assert(t, *tree.GetPathWithCorrectCase("a/c") == "/A/C", "Path for 'a/c' should be '/A/C'")
|
||||
assert.True(t, *tree.GetPathWithCorrectCase("a/c") == "/A/C", "Path for 'a/c' should be '/A/C'")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
assert.True(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutAndWalk(t *testing.T) {
|
||||
@@ -77,16 +92,16 @@ func TestPutAndWalk(t *testing.T) {
|
||||
tree.PutCaseCorrectDirectoryName("", "A")
|
||||
|
||||
numCalled := 0
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
|
||||
assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
|
||||
assert(t, entry.Path == "xxx", "entry.Path should be xxx")
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) error {
|
||||
assert.True(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
|
||||
assert.True(t, entry.Path == "xxx", "entry.Path should be xxx")
|
||||
numCalled++
|
||||
return nil
|
||||
}
|
||||
tree.WalkFiles("", walkFunc)
|
||||
|
||||
assert(t, numCalled == 1, "walk func should be called only once")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
err := tree.WalkFiles("", walkFunc)
|
||||
assert.True(t, err == nil, "No error should be returned")
|
||||
assert.True(t, numCalled == 1, "walk func should be called only once")
|
||||
assert.True(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutAndWalkWithPrefix(t *testing.T) {
|
||||
@@ -97,16 +112,16 @@ func TestPutAndWalkWithPrefix(t *testing.T) {
|
||||
tree.PutCaseCorrectDirectoryName("", "A")
|
||||
|
||||
numCalled := 0
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
|
||||
assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
|
||||
assert(t, entry.Path == "xxx", "entry.Path should be xxx")
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) error {
|
||||
assert.True(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
|
||||
assert.True(t, entry.Path == "xxx", "entry.Path should be xxx")
|
||||
numCalled++
|
||||
return nil
|
||||
}
|
||||
tree.WalkFiles("A", walkFunc)
|
||||
|
||||
assert(t, numCalled == 1, "walk func should be called only once")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
err := tree.WalkFiles("A", walkFunc)
|
||||
assert.True(t, err == nil, "No error should be returned")
|
||||
assert.True(t, numCalled == 1, "walk func should be called only once")
|
||||
assert.True(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutAndWalkIncompleteTree(t *testing.T) {
|
||||
@@ -115,10 +130,11 @@ func TestPutAndWalkIncompleteTree(t *testing.T) {
|
||||
tree := newNameTree()
|
||||
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
|
||||
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) error {
|
||||
t.Fatal("Should not be called")
|
||||
return nil
|
||||
}
|
||||
tree.WalkFiles("", walkFunc)
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors+1, "One error should be reported")
|
||||
err := tree.WalkFiles("", walkFunc)
|
||||
assert.True(t, err == nil, "No error should be returned")
|
||||
assert.True(t, fs.Stats.GetErrors() == errors+1, "One error should be reported")
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -120,18 +119,18 @@ func (s *StatsInfo) String() string {
|
||||
dtSeconds := dt.Seconds()
|
||||
speed := 0.0
|
||||
if dt > 0 {
|
||||
speed = float64(s.bytes) / 1024 / dtSeconds
|
||||
speed = float64(s.bytes) / dtSeconds
|
||||
}
|
||||
dtRounded := dt - (dt % (time.Second / 10))
|
||||
buf := &bytes.Buffer{}
|
||||
fmt.Fprintf(buf, `
|
||||
Transferred: %10d Bytes (%7.2f kByte/s)
|
||||
Transferred: %10s (%s)
|
||||
Errors: %10d
|
||||
Checks: %10d
|
||||
Transferred: %10d
|
||||
Elapsed time: %10v
|
||||
`,
|
||||
s.bytes, speed,
|
||||
SizeSuffix(s.bytes).Unit("Bytes"), SizeSuffix(speed).Unit("Bytes/s"),
|
||||
s.errors,
|
||||
s.checks,
|
||||
s.transfers,
|
||||
@@ -147,7 +146,7 @@ Elapsed time: %10v
|
||||
|
||||
// Log outputs the StatsInfo to the log
|
||||
func (s *StatsInfo) Log() {
|
||||
log.Printf("%v\n", s)
|
||||
Log(nil, "%v\n", s)
|
||||
}
|
||||
|
||||
// Bytes updates the stats for bytes bytes
|
||||
@@ -203,17 +202,17 @@ func (s *StatsInfo) Error() {
|
||||
}
|
||||
|
||||
// Checking adds a check into the stats
|
||||
func (s *StatsInfo) Checking(o Object) {
|
||||
func (s *StatsInfo) Checking(remote string) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
s.checking[o.Remote()] = struct{}{}
|
||||
s.checking[remote] = struct{}{}
|
||||
}
|
||||
|
||||
// DoneChecking removes a check from the stats
|
||||
func (s *StatsInfo) DoneChecking(o Object) {
|
||||
func (s *StatsInfo) DoneChecking(remote string) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
delete(s.checking, o.Remote())
|
||||
delete(s.checking, remote)
|
||||
s.checks++
|
||||
}
|
||||
|
||||
@@ -225,17 +224,17 @@ func (s *StatsInfo) GetTransfers() int64 {
|
||||
}
|
||||
|
||||
// Transferring adds a transfer into the stats
|
||||
func (s *StatsInfo) Transferring(o Object) {
|
||||
func (s *StatsInfo) Transferring(remote string) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
s.transferring[o.Remote()] = struct{}{}
|
||||
s.transferring[remote] = struct{}{}
|
||||
}
|
||||
|
||||
// DoneTransferring removes a transfer from the stats
|
||||
func (s *StatsInfo) DoneTransferring(o Object) {
|
||||
func (s *StatsInfo) DoneTransferring(remote string) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
delete(s.transferring, o.Remote())
|
||||
delete(s.transferring, remote)
|
||||
s.transfers++
|
||||
}
|
||||
|
||||
@@ -256,6 +255,7 @@ type Account struct {
|
||||
lpTime time.Time // Time of last average measurement
|
||||
lpBytes int // Number of bytes read since last measurement
|
||||
avg ewma.MovingAverage // Moving average of last few measurements
|
||||
closed bool // set if the file is closed
|
||||
exit chan struct{} // channel that will be closed when transfer is finished
|
||||
}
|
||||
|
||||
@@ -408,9 +408,13 @@ func (file *Account) String() string {
|
||||
|
||||
// Close the object
|
||||
func (file *Account) Close() error {
|
||||
close(file.exit)
|
||||
file.mu.Lock()
|
||||
defer file.mu.Unlock()
|
||||
if file.closed {
|
||||
return nil
|
||||
}
|
||||
file.closed = true
|
||||
close(file.exit)
|
||||
Stats.inProgress.clear(file.name)
|
||||
return file.in.Close()
|
||||
}
|
||||
|
||||
16
fs/all/all.go
Normal file
16
fs/all/all.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/amazonclouddrive"
|
||||
_ "github.com/ncw/rclone/b2"
|
||||
_ "github.com/ncw/rclone/drive"
|
||||
_ "github.com/ncw/rclone/dropbox"
|
||||
_ "github.com/ncw/rclone/googlecloudstorage"
|
||||
_ "github.com/ncw/rclone/hubic"
|
||||
_ "github.com/ncw/rclone/local"
|
||||
_ "github.com/ncw/rclone/onedrive"
|
||||
_ "github.com/ncw/rclone/s3"
|
||||
_ "github.com/ncw/rclone/swift"
|
||||
_ "github.com/ncw/rclone/yandex"
|
||||
)
|
||||
200
fs/buffer.go
Normal file
200
fs/buffer.go
Normal file
@@ -0,0 +1,200 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// asyncReader will do async read-ahead from the input reader
|
||||
// and make the data available as an io.Reader.
|
||||
// This should be fully transparent, except that once an error
|
||||
// has been returned from the Reader, it will not recover.
|
||||
type asyncReader struct {
|
||||
in io.ReadCloser // Input reader
|
||||
ready chan *buffer // Buffers ready to be handed to the reader
|
||||
reuse chan *buffer // Buffers to reuse for input reading
|
||||
exit chan struct{} // Closes when finished
|
||||
buffers int // Number of buffers
|
||||
err error // If an error has occurred it is here
|
||||
cur *buffer // Current buffer being served
|
||||
exited chan struct{} // Channel is closed been the async reader shuts down
|
||||
closed bool // Has the parent reader been closed?
|
||||
}
|
||||
|
||||
// newAsyncReader returns a reader that will asynchronously read from
|
||||
// the supplied Reader into a number of buffers each with a given size.
|
||||
// It will start reading from the input at once, maybe even before this
|
||||
// function has returned.
|
||||
// The input can be read from the returned reader.
|
||||
// When done use Close to release the buffers and close the supplied input.
|
||||
func newAsyncReader(rd io.ReadCloser, buffers, size int) (io.ReadCloser, error) {
|
||||
if size <= 0 {
|
||||
return nil, errors.New("buffer size too small")
|
||||
}
|
||||
if buffers <= 0 {
|
||||
return nil, errors.New("number of buffers too small")
|
||||
}
|
||||
if rd == nil {
|
||||
return nil, errors.New("nil reader supplied")
|
||||
}
|
||||
a := &asyncReader{}
|
||||
a.init(rd, buffers, size)
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (a *asyncReader) init(rd io.ReadCloser, buffers, size int) {
|
||||
a.in = rd
|
||||
a.ready = make(chan *buffer, buffers)
|
||||
a.reuse = make(chan *buffer, buffers)
|
||||
a.exit = make(chan struct{}, 0)
|
||||
a.exited = make(chan struct{}, 0)
|
||||
a.buffers = buffers
|
||||
a.cur = nil
|
||||
|
||||
// Create buffers
|
||||
for i := 0; i < buffers; i++ {
|
||||
a.reuse <- newBuffer(size)
|
||||
}
|
||||
|
||||
// Start async reader
|
||||
go func() {
|
||||
// Ensure that when we exit this is signalled.
|
||||
defer close(a.exited)
|
||||
for {
|
||||
select {
|
||||
case b := <-a.reuse:
|
||||
err := b.read(a.in)
|
||||
a.ready <- b
|
||||
if err != nil {
|
||||
close(a.ready)
|
||||
return
|
||||
}
|
||||
case <-a.exit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Read will return the next available data.
|
||||
func (a *asyncReader) fill() (err error) {
|
||||
if a.cur.isEmpty() {
|
||||
if a.cur != nil {
|
||||
a.reuse <- a.cur
|
||||
a.cur = nil
|
||||
}
|
||||
b, ok := <-a.ready
|
||||
if !ok {
|
||||
return a.err
|
||||
}
|
||||
a.cur = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read will return the next available data.
|
||||
func (a *asyncReader) Read(p []byte) (n int, err error) {
|
||||
// Swap buffer and maybe return error
|
||||
err = a.fill()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Copy what we can
|
||||
n = copy(p, a.cur.buffer())
|
||||
a.cur.increment(n)
|
||||
|
||||
// If at end of buffer, return any error, if present
|
||||
if a.cur.isEmpty() {
|
||||
a.err = a.cur.err
|
||||
return n, a.err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// WriteTo writes data to w until there's no more data to write or when an error occurs.
|
||||
// The return value n is the number of bytes written.
|
||||
// Any error encountered during the write is also returned.
|
||||
func (a *asyncReader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
n = 0
|
||||
for {
|
||||
err = a.fill()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
n2, err := w.Write(a.cur.buffer())
|
||||
a.cur.increment(n2)
|
||||
n += int64(n2)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if a.cur.err != nil {
|
||||
a.err = a.cur.err
|
||||
return n, a.cur.err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close will ensure that the underlying async reader is shut down.
|
||||
// It will also close the input supplied on newAsyncReader.
|
||||
func (a *asyncReader) Close() (err error) {
|
||||
select {
|
||||
case <-a.exited:
|
||||
default:
|
||||
close(a.exit)
|
||||
<-a.exited
|
||||
}
|
||||
if !a.closed {
|
||||
a.closed = true
|
||||
return a.in.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Internal buffer
|
||||
// If an error is present, it must be returned
|
||||
// once all buffer content has been served.
|
||||
type buffer struct {
|
||||
buf []byte
|
||||
err error
|
||||
offset int
|
||||
size int
|
||||
}
|
||||
|
||||
func newBuffer(size int) *buffer {
|
||||
return &buffer{buf: make([]byte, size), err: nil, size: size}
|
||||
}
|
||||
|
||||
// isEmpty returns true is offset is at end of
|
||||
// buffer, or
|
||||
func (b *buffer) isEmpty() bool {
|
||||
if b == nil {
|
||||
return true
|
||||
}
|
||||
if len(b.buf)-b.offset <= 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// read into start of the buffer from the supplied reader,
|
||||
// resets the offset and updates the size of the buffer.
|
||||
// Any error encountered during the read is returned.
|
||||
func (b *buffer) read(rd io.Reader) error {
|
||||
var n int
|
||||
n, b.err = rd.Read(b.buf[0:b.size])
|
||||
b.buf = b.buf[0:n]
|
||||
b.offset = 0
|
||||
return b.err
|
||||
}
|
||||
|
||||
// Return the buffer at current offset
|
||||
func (b *buffer) buffer() []byte {
|
||||
return b.buf[b.offset:]
|
||||
}
|
||||
|
||||
// increment the offset
|
||||
func (b *buffer) increment(n int) {
|
||||
b.offset += n
|
||||
}
|
||||
223
fs/buffer_test.go
Normal file
223
fs/buffer_test.go
Normal file
@@ -0,0 +1,223 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/iotest"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAsyncReader(t *testing.T) {
|
||||
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
|
||||
ar, err := newAsyncReader(buf, 4, 10000)
|
||||
require.NoError(t, err)
|
||||
|
||||
var dst = make([]byte, 100)
|
||||
n, err := ar.Read(dst)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 10, n)
|
||||
|
||||
n, err = ar.Read(dst)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, 0, n)
|
||||
|
||||
// Test read after error
|
||||
n, err = ar.Read(dst)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, 0, n)
|
||||
|
||||
err = ar.Close()
|
||||
require.NoError(t, err)
|
||||
// Test double close
|
||||
err = ar.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test Close without reading everything
|
||||
buf = ioutil.NopCloser(bytes.NewBuffer(make([]byte, 50000)))
|
||||
ar, err = newAsyncReader(buf, 4, 100)
|
||||
require.NoError(t, err)
|
||||
err = ar.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestAsyncWriteTo(t *testing.T) {
|
||||
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
|
||||
ar, err := newAsyncReader(buf, 4, 10000)
|
||||
require.NoError(t, err)
|
||||
|
||||
var dst = &bytes.Buffer{}
|
||||
n, err := io.Copy(dst, ar)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, int64(10), n)
|
||||
|
||||
// Should still return EOF
|
||||
n, err = io.Copy(dst, ar)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, int64(0), n)
|
||||
|
||||
err = ar.Close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestAsyncReaderErrors(t *testing.T) {
|
||||
// test nil reader
|
||||
_, err := newAsyncReader(nil, 4, 10000)
|
||||
require.Error(t, err)
|
||||
|
||||
// invalid buffer number
|
||||
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
|
||||
_, err = newAsyncReader(buf, 0, 10000)
|
||||
require.Error(t, err)
|
||||
_, err = newAsyncReader(buf, -1, 10000)
|
||||
require.Error(t, err)
|
||||
|
||||
// invalid buffer size
|
||||
_, err = newAsyncReader(buf, 4, 0)
|
||||
require.Error(t, err)
|
||||
_, err = newAsyncReader(buf, 4, -1)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// Complex read tests, leveraged from "bufio".
|
||||
|
||||
type readMaker struct {
|
||||
name string
|
||||
fn func(io.Reader) io.Reader
|
||||
}
|
||||
|
||||
var readMakers = []readMaker{
|
||||
{"full", func(r io.Reader) io.Reader { return r }},
|
||||
{"byte", iotest.OneByteReader},
|
||||
{"half", iotest.HalfReader},
|
||||
{"data+err", iotest.DataErrReader},
|
||||
{"timeout", iotest.TimeoutReader},
|
||||
}
|
||||
|
||||
// Call Read to accumulate the text of a file
|
||||
func reads(buf io.Reader, m int) string {
|
||||
var b [1000]byte
|
||||
nb := 0
|
||||
for {
|
||||
n, err := buf.Read(b[nb : nb+m])
|
||||
nb += n
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil && err != iotest.ErrTimeout {
|
||||
panic("Data: " + err.Error())
|
||||
} else if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return string(b[0:nb])
|
||||
}
|
||||
|
||||
type bufReader struct {
|
||||
name string
|
||||
fn func(io.Reader) string
|
||||
}
|
||||
|
||||
var bufreaders = []bufReader{
|
||||
{"1", func(b io.Reader) string { return reads(b, 1) }},
|
||||
{"2", func(b io.Reader) string { return reads(b, 2) }},
|
||||
{"3", func(b io.Reader) string { return reads(b, 3) }},
|
||||
{"4", func(b io.Reader) string { return reads(b, 4) }},
|
||||
{"5", func(b io.Reader) string { return reads(b, 5) }},
|
||||
{"7", func(b io.Reader) string { return reads(b, 7) }},
|
||||
}
|
||||
|
||||
const minReadBufferSize = 16
|
||||
|
||||
var bufsizes = []int{
|
||||
0, minReadBufferSize, 23, 32, 46, 64, 93, 128, 1024, 4096,
|
||||
}
|
||||
|
||||
// Test various input buffer sizes, number of buffers and read sizes.
|
||||
func TestAsyncReaderSizes(t *testing.T) {
|
||||
var texts [31]string
|
||||
str := ""
|
||||
all := ""
|
||||
for i := 0; i < len(texts)-1; i++ {
|
||||
texts[i] = str + "\n"
|
||||
all += texts[i]
|
||||
str += string(i%26 + 'a')
|
||||
}
|
||||
texts[len(texts)-1] = all
|
||||
|
||||
for h := 0; h < len(texts); h++ {
|
||||
text := texts[h]
|
||||
for i := 0; i < len(readMakers); i++ {
|
||||
for j := 0; j < len(bufreaders); j++ {
|
||||
for k := 0; k < len(bufsizes); k++ {
|
||||
for l := 1; l < 10; l++ {
|
||||
readmaker := readMakers[i]
|
||||
bufreader := bufreaders[j]
|
||||
bufsize := bufsizes[k]
|
||||
read := readmaker.fn(strings.NewReader(text))
|
||||
buf := bufio.NewReaderSize(read, bufsize)
|
||||
ar, _ := newAsyncReader(ioutil.NopCloser(buf), l, 100)
|
||||
s := bufreader.fn(ar)
|
||||
// "timeout" expects the Reader to recover, asyncReader does not.
|
||||
if s != text && readmaker.name != "timeout" {
|
||||
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
|
||||
readmaker.name, bufreader.name, bufsize, text, s)
|
||||
}
|
||||
err := ar.Close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test various input buffer sizes, number of buffers and read sizes.
|
||||
func TestAsyncReaderWriteTo(t *testing.T) {
|
||||
var texts [31]string
|
||||
str := ""
|
||||
all := ""
|
||||
for i := 0; i < len(texts)-1; i++ {
|
||||
texts[i] = str + "\n"
|
||||
all += texts[i]
|
||||
str += string(i%26 + 'a')
|
||||
}
|
||||
texts[len(texts)-1] = all
|
||||
|
||||
for h := 0; h < len(texts); h++ {
|
||||
text := texts[h]
|
||||
for i := 0; i < len(readMakers); i++ {
|
||||
for j := 0; j < len(bufreaders); j++ {
|
||||
for k := 0; k < len(bufsizes); k++ {
|
||||
for l := 1; l < 10; l++ {
|
||||
readmaker := readMakers[i]
|
||||
bufreader := bufreaders[j]
|
||||
bufsize := bufsizes[k]
|
||||
read := readmaker.fn(strings.NewReader(text))
|
||||
buf := bufio.NewReaderSize(read, bufsize)
|
||||
ar, _ := newAsyncReader(ioutil.NopCloser(buf), l, 100)
|
||||
dst := &bytes.Buffer{}
|
||||
wt := ar.(io.WriterTo)
|
||||
_, err := wt.WriteTo(dst)
|
||||
if err != nil && err != io.EOF && err != iotest.ErrTimeout {
|
||||
t.Fatal("Copy:", err)
|
||||
}
|
||||
s := dst.String()
|
||||
// "timeout" expects the Reader to recover, asyncReader does not.
|
||||
if s != text && readmaker.name != "timeout" {
|
||||
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
|
||||
readmaker.name, bufreader.name, bufsize, text, s)
|
||||
}
|
||||
err = ar.Close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
9
fs/closed_conn.go
Normal file
9
fs/closed_conn.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// +build !windows
|
||||
|
||||
package fs
|
||||
|
||||
// isClosedConnErrorPlatform reports whether err is an error from use
|
||||
// of a closed network connection using platform specific error codes.
|
||||
func isClosedConnErrorPlatform(err error) bool {
|
||||
return false
|
||||
}
|
||||
34
fs/closed_conn_win.go
Normal file
34
fs/closed_conn_win.go
Normal file
@@ -0,0 +1,34 @@
|
||||
// +build windows
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// isClosedConnErrorPlatform reports whether err is an error from use
|
||||
// of a closed network connection using platform specific error codes.
|
||||
//
|
||||
// Code adapted from net/http
|
||||
func isClosedConnErrorPlatform(err error) bool {
|
||||
if oe, ok := err.(*net.OpError); ok {
|
||||
if se, ok := oe.Err.(*os.SyscallError); ok {
|
||||
if errno, ok := se.Err.(syscall.Errno); ok {
|
||||
const (
|
||||
WSAECONNABORTED syscall.Errno = 10053
|
||||
WSAHOST_NOT_FOUND syscall.Errno = 11001
|
||||
WSATRY_AGAIN syscall.Errno = 11002
|
||||
WSAENETRESET syscall.Errno = 10052
|
||||
WSAETIMEDOUT syscall.Errno = 10060
|
||||
)
|
||||
switch errno {
|
||||
case syscall.WSAECONNRESET, WSAECONNABORTED, WSAHOST_NOT_FOUND, WSATRY_AGAIN, WSAENETRESET, WSAETIMEDOUT:
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
585
fs/config.go
585
fs/config.go
@@ -4,8 +4,14 @@ package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
@@ -16,14 +22,30 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/Unknwon/goconfig"
|
||||
"github.com/mreiferson/go-httpclient"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
const (
|
||||
configFileName = ".rclone.conf"
|
||||
|
||||
// ConfigToken is the key used to store the token under
|
||||
ConfigToken = "token"
|
||||
|
||||
// ConfigClientID is the config key used to store the client id
|
||||
ConfigClientID = "client_id"
|
||||
|
||||
// ConfigClientSecret is the config key used to store the client secret
|
||||
ConfigClientSecret = "client_secret"
|
||||
|
||||
// ConfigAutomatic indicates that we want non-interactive configuration
|
||||
ConfigAutomatic = "config_automatic"
|
||||
)
|
||||
|
||||
// SizeSuffix is parsed by flag with k/M/G suffixes
|
||||
@@ -40,33 +62,57 @@ var (
|
||||
// Config is the global config
|
||||
Config = &ConfigInfo{}
|
||||
// Flags
|
||||
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
|
||||
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
|
||||
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
|
||||
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
|
||||
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
|
||||
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
|
||||
checkSum = pflag.BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
|
||||
sizeOnly = pflag.BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
|
||||
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
|
||||
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
|
||||
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
|
||||
dumpHeaders = pflag.BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
|
||||
dumpBodies = pflag.BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
|
||||
bwLimit SizeSuffix
|
||||
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
|
||||
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
|
||||
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
|
||||
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
|
||||
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
|
||||
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
|
||||
checkSum = pflag.BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
|
||||
sizeOnly = pflag.BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
|
||||
ignoreTimes = pflag.BoolP("ignore-times", "I", false, "Don't skip files that match size and time - transfer all files")
|
||||
ignoreExisting = pflag.BoolP("ignore-existing", "", false, "Skip all files that exist on destination")
|
||||
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
|
||||
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
|
||||
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
|
||||
dumpHeaders = pflag.BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
|
||||
dumpBodies = pflag.BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
|
||||
skipVerify = pflag.BoolP("no-check-certificate", "", false, "Do not verify the server SSL certificate. Insecure.")
|
||||
AskPassword = pflag.BoolP("ask-password", "", true, "Allow prompt for password for encrypted configuration.")
|
||||
deleteBefore = pflag.BoolP("delete-before", "", false, "When synchronizing, delete files on destination before transfering")
|
||||
deleteDuring = pflag.BoolP("delete-during", "", false, "When synchronizing, delete files during transfer (default)")
|
||||
deleteAfter = pflag.BoolP("delete-after", "", false, "When synchronizing, delete files on destination after transfering")
|
||||
lowLevelRetries = pflag.IntP("low-level-retries", "", 10, "Number of low level retries to do.")
|
||||
updateOlder = pflag.BoolP("update", "u", false, "Skip files that are newer on the destination.")
|
||||
noGzip = pflag.BoolP("no-gzip-encoding", "", false, "Don't set Accept-Encoding: gzip.")
|
||||
dedupeMode = pflag.StringP("dedupe-mode", "", "interactive", "Dedupe mode interactive|skip|first|newest|oldest|rename.")
|
||||
maxDepth = pflag.IntP("max-depth", "", -1, "If set limits the recursion depth to this.")
|
||||
ignoreSize = pflag.BoolP("ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.")
|
||||
noTraverse = pflag.BoolP("no-traverse", "", false, "Don't traverse destination file system on copy.")
|
||||
noUpdateModTime = pflag.BoolP("no-update-modtime", "", false, "Don't update destination mod-time if files identical.")
|
||||
bwLimit SizeSuffix
|
||||
|
||||
// Key to use for password en/decryption.
|
||||
// When nil, no encryption will be used for saving.
|
||||
configKey []byte
|
||||
)
|
||||
|
||||
func init() {
|
||||
pflag.VarP(&bwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix k|M|G")
|
||||
pflag.VarP(&bwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G")
|
||||
}
|
||||
|
||||
// Turn SizeSuffix into a string
|
||||
func (x SizeSuffix) String() string {
|
||||
// Turn SizeSuffix into a string and a suffix
|
||||
func (x SizeSuffix) string() (string, string) {
|
||||
scaled := float64(0)
|
||||
suffix := ""
|
||||
switch {
|
||||
case x < 0:
|
||||
return "off", ""
|
||||
case x == 0:
|
||||
return "0"
|
||||
return "0", ""
|
||||
case x < 1024:
|
||||
scaled = float64(x)
|
||||
suffix = ""
|
||||
case x < 1024*1024:
|
||||
scaled = float64(x) / 1024
|
||||
suffix = "k"
|
||||
@@ -78,15 +124,34 @@ func (x SizeSuffix) String() string {
|
||||
suffix = "G"
|
||||
}
|
||||
if math.Floor(scaled) == scaled {
|
||||
return fmt.Sprintf("%.0f%s", scaled, suffix)
|
||||
return fmt.Sprintf("%.0f", scaled), suffix
|
||||
}
|
||||
return fmt.Sprintf("%.3f%s", scaled, suffix)
|
||||
return fmt.Sprintf("%.3f", scaled), suffix
|
||||
}
|
||||
|
||||
// String turns SizeSuffix into a string
|
||||
func (x SizeSuffix) String() string {
|
||||
val, suffix := x.string()
|
||||
return val + suffix
|
||||
}
|
||||
|
||||
// Unit turns SizeSuffix into a string with a unit
|
||||
func (x SizeSuffix) Unit(unit string) string {
|
||||
val, suffix := x.string()
|
||||
if val == "off" {
|
||||
return val
|
||||
}
|
||||
return val + " " + suffix + unit
|
||||
}
|
||||
|
||||
// Set a SizeSuffix
|
||||
func (x *SizeSuffix) Set(s string) error {
|
||||
if len(s) == 0 {
|
||||
return fmt.Errorf("Empty string")
|
||||
return errors.New("empty string")
|
||||
}
|
||||
if strings.ToLower(s) == "off" {
|
||||
*x = -1
|
||||
return nil
|
||||
}
|
||||
suffix := s[len(s)-1]
|
||||
suffixLen := 1
|
||||
@@ -95,6 +160,8 @@ func (x *SizeSuffix) Set(s string) error {
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
|
||||
suffixLen = 0
|
||||
multiplier = 1 << 10
|
||||
case 'b', 'B':
|
||||
multiplier = 1
|
||||
case 'k', 'K':
|
||||
multiplier = 1 << 10
|
||||
case 'm', 'M':
|
||||
@@ -102,7 +169,7 @@ func (x *SizeSuffix) Set(s string) error {
|
||||
case 'g', 'G':
|
||||
multiplier = 1 << 30
|
||||
default:
|
||||
return fmt.Errorf("Bad suffix %q", suffix)
|
||||
return errors.Errorf("bad suffix %q", suffix)
|
||||
}
|
||||
s = s[:len(s)-suffixLen]
|
||||
value, err := strconv.ParseFloat(s, 64)
|
||||
@@ -110,7 +177,7 @@ func (x *SizeSuffix) Set(s string) error {
|
||||
return err
|
||||
}
|
||||
if value < 0 {
|
||||
return fmt.Errorf("Size can't be negative %q", s)
|
||||
return errors.Errorf("size can't be negative %q", s)
|
||||
}
|
||||
value *= multiplier
|
||||
*x = SizeSuffix(value)
|
||||
@@ -148,19 +215,33 @@ func Reveal(y string) string {
|
||||
|
||||
// ConfigInfo is filesystem config options
|
||||
type ConfigInfo struct {
|
||||
Verbose bool
|
||||
Quiet bool
|
||||
DryRun bool
|
||||
CheckSum bool
|
||||
SizeOnly bool
|
||||
ModifyWindow time.Duration
|
||||
Checkers int
|
||||
Transfers int
|
||||
ConnectTimeout time.Duration // Connect timeout
|
||||
Timeout time.Duration // Data channel timeout
|
||||
DumpHeaders bool
|
||||
DumpBodies bool
|
||||
Filter *Filter
|
||||
Verbose bool
|
||||
Quiet bool
|
||||
DryRun bool
|
||||
CheckSum bool
|
||||
SizeOnly bool
|
||||
IgnoreTimes bool
|
||||
IgnoreExisting bool
|
||||
ModifyWindow time.Duration
|
||||
Checkers int
|
||||
Transfers int
|
||||
ConnectTimeout time.Duration // Connect timeout
|
||||
Timeout time.Duration // Data channel timeout
|
||||
DumpHeaders bool
|
||||
DumpBodies bool
|
||||
Filter *Filter
|
||||
InsecureSkipVerify bool // Skip server certificate verification
|
||||
DeleteBefore bool // Delete before checking
|
||||
DeleteDuring bool // Delete during checking/transfer
|
||||
DeleteAfter bool // Delete after successful transfer.
|
||||
LowLevelRetries int
|
||||
UpdateOlder bool // Skip files that are newer on the destination
|
||||
NoGzip bool // Disable compression
|
||||
DedupeMode DeduplicateMode
|
||||
MaxDepth int
|
||||
IgnoreSize bool
|
||||
NoTraverse bool
|
||||
NoUpdateModTime bool
|
||||
}
|
||||
|
||||
// Transport returns an http.RoundTripper with the correct timeouts
|
||||
@@ -187,6 +268,24 @@ func (ci *ConfigInfo) Transport() http.RoundTripper {
|
||||
// ReadWriteTimeout, if non-zero, will set a deadline for every Read and
|
||||
// Write operation on the request connection.
|
||||
ReadWriteTimeout: ci.Timeout,
|
||||
|
||||
// InsecureSkipVerify controls whether a client verifies the
|
||||
// server's certificate chain and host name.
|
||||
// If InsecureSkipVerify is true, TLS accepts any certificate
|
||||
// presented by the server and any host name in that certificate.
|
||||
// In this mode, TLS is susceptible to man-in-the-middle attacks.
|
||||
// This should be used only for testing.
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: ci.InsecureSkipVerify},
|
||||
|
||||
// DisableCompression, if true, prevents the Transport from
|
||||
// requesting compression with an "Accept-Encoding: gzip"
|
||||
// request header when the Request contains no existing
|
||||
// Accept-Encoding value. If the Transport requests gzip on
|
||||
// its own and gets a gzipped response, it's transparently
|
||||
// decoded in the Response.Body. However, if the user
|
||||
// explicitly requested gzip it is not automatically
|
||||
// uncompressed.
|
||||
DisableCompression: *noGzip,
|
||||
}
|
||||
if ci.DumpHeaders || ci.DumpBodies {
|
||||
return NewLoggedTransport(t, ci.DumpBodies)
|
||||
@@ -215,10 +314,10 @@ func configHome() string {
|
||||
if home != "" {
|
||||
return home
|
||||
}
|
||||
log.Printf("Couldn't find home directory or read HOME environment variable.")
|
||||
log.Printf("Defaulting to storing config in current directory.")
|
||||
log.Printf("Use -config flag to workaround.")
|
||||
log.Printf("Error was: %v", err)
|
||||
ErrorLog(nil, "Couldn't find home directory or read HOME environment variable.")
|
||||
ErrorLog(nil, "Defaulting to storing config in current directory.")
|
||||
ErrorLog(nil, "Use -config flag to workaround.")
|
||||
ErrorLog(nil, "Error was: %v", err)
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -237,20 +336,61 @@ func LoadConfig() {
|
||||
Config.ConnectTimeout = *connectTimeout
|
||||
Config.CheckSum = *checkSum
|
||||
Config.SizeOnly = *sizeOnly
|
||||
Config.IgnoreTimes = *ignoreTimes
|
||||
Config.IgnoreExisting = *ignoreExisting
|
||||
Config.DumpHeaders = *dumpHeaders
|
||||
Config.DumpBodies = *dumpBodies
|
||||
Config.InsecureSkipVerify = *skipVerify
|
||||
Config.LowLevelRetries = *lowLevelRetries
|
||||
Config.UpdateOlder = *updateOlder
|
||||
Config.NoGzip = *noGzip
|
||||
Config.MaxDepth = *maxDepth
|
||||
Config.IgnoreSize = *ignoreSize
|
||||
Config.NoTraverse = *noTraverse
|
||||
Config.NoUpdateModTime = *noUpdateModTime
|
||||
|
||||
ConfigPath = *configFile
|
||||
|
||||
Config.DeleteBefore = *deleteBefore
|
||||
Config.DeleteDuring = *deleteDuring
|
||||
Config.DeleteAfter = *deleteAfter
|
||||
|
||||
switch strings.ToLower(*dedupeMode) {
|
||||
case "interactive":
|
||||
Config.DedupeMode = DeduplicateInteractive
|
||||
case "skip":
|
||||
Config.DedupeMode = DeduplicateSkip
|
||||
case "first":
|
||||
Config.DedupeMode = DeduplicateFirst
|
||||
case "newest":
|
||||
Config.DedupeMode = DeduplicateNewest
|
||||
case "oldest":
|
||||
Config.DedupeMode = DeduplicateOldest
|
||||
case "rename":
|
||||
Config.DedupeMode = DeduplicateRename
|
||||
default:
|
||||
log.Fatalf(`Unknown mode for --dedupe-mode %q.`, *dedupeMode)
|
||||
}
|
||||
|
||||
switch {
|
||||
case *deleteBefore && (*deleteDuring || *deleteAfter),
|
||||
*deleteDuring && *deleteAfter:
|
||||
log.Fatalf(`Only one of --delete-before, --delete-during or --delete-after can be used.`)
|
||||
|
||||
// If none are specified, use "during".
|
||||
case !*deleteBefore && !*deleteDuring && !*deleteAfter:
|
||||
Config.DeleteDuring = true
|
||||
}
|
||||
|
||||
if Config.IgnoreSize && Config.SizeOnly {
|
||||
log.Fatalf(`Can't use --size-only and --ignore-size together.`)
|
||||
}
|
||||
|
||||
// Load configuration file.
|
||||
var err error
|
||||
ConfigFile, err = goconfig.LoadConfigFile(ConfigPath)
|
||||
ConfigFile, err = loadConfigFile()
|
||||
if err != nil {
|
||||
log.Printf("Failed to load config file %v - using defaults: %v", ConfigPath, err)
|
||||
ConfigFile, err = goconfig.LoadConfigFile(os.DevNull)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read null config file: %v", err)
|
||||
}
|
||||
log.Fatalf("Failed to config file \"%s\": %v", ConfigPath, err)
|
||||
}
|
||||
|
||||
// Load filters
|
||||
@@ -263,15 +403,189 @@ func LoadConfig() {
|
||||
startTokenBucket()
|
||||
}
|
||||
|
||||
// loadConfigFile will load a config file, and
|
||||
// automatically decrypt it.
|
||||
func loadConfigFile() (*goconfig.ConfigFile, error) {
|
||||
b, err := ioutil.ReadFile(ConfigPath)
|
||||
if err != nil {
|
||||
Log(nil, "Failed to load config file \"%v\" - using defaults: %v", ConfigPath, err)
|
||||
return goconfig.LoadFromReader(&bytes.Buffer{})
|
||||
}
|
||||
|
||||
// Find first non-empty line
|
||||
r := bufio.NewReader(bytes.NewBuffer(b))
|
||||
for {
|
||||
line, _, err := r.ReadLine()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return goconfig.LoadFromReader(bytes.NewBuffer(b))
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
l := strings.TrimSpace(string(line))
|
||||
if len(l) == 0 || strings.HasPrefix(l, ";") || strings.HasPrefix(l, "#") {
|
||||
continue
|
||||
}
|
||||
// First non-empty or non-comment must be ENCRYPT_V0
|
||||
if l == "RCLONE_ENCRYPT_V0:" {
|
||||
break
|
||||
}
|
||||
if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") {
|
||||
return nil, errors.New("unsupported configuration encryption - update rclone for support")
|
||||
}
|
||||
return goconfig.LoadFromReader(bytes.NewBuffer(b))
|
||||
}
|
||||
|
||||
// Encrypted content is base64 encoded.
|
||||
dec := base64.NewDecoder(base64.StdEncoding, r)
|
||||
box, err := ioutil.ReadAll(dec)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to load base64 encoded data")
|
||||
}
|
||||
if len(box) < 24+secretbox.Overhead {
|
||||
return nil, errors.New("Configuration data too short")
|
||||
}
|
||||
envpw := os.Getenv("RCLONE_CONFIG_PASS")
|
||||
|
||||
var out []byte
|
||||
for {
|
||||
if len(configKey) == 0 && envpw != "" {
|
||||
err := setPassword(envpw)
|
||||
if err != nil {
|
||||
fmt.Println("Using RCLONE_CONFIG_PASS returned:", err)
|
||||
envpw = ""
|
||||
} else {
|
||||
Debug(nil, "Using RCLONE_CONFIG_PASS password.")
|
||||
}
|
||||
}
|
||||
if len(configKey) == 0 {
|
||||
if !*AskPassword {
|
||||
return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
|
||||
}
|
||||
getPassword("Enter configuration password:")
|
||||
}
|
||||
|
||||
// Nonce is first 24 bytes of the ciphertext
|
||||
var nonce [24]byte
|
||||
copy(nonce[:], box[:24])
|
||||
var key [32]byte
|
||||
copy(key[:], configKey[:32])
|
||||
|
||||
// Attempt to decrypt
|
||||
var ok bool
|
||||
out, ok = secretbox.Open(nil, box[24:], &nonce, &key)
|
||||
if ok {
|
||||
break
|
||||
}
|
||||
|
||||
// Retry
|
||||
ErrorLog(nil, "Couldn't decrypt configuration, most likely wrong password.")
|
||||
configKey = nil
|
||||
envpw = ""
|
||||
}
|
||||
return goconfig.LoadFromReader(bytes.NewBuffer(out))
|
||||
}
|
||||
|
||||
// getPassword will query the user for a password the
|
||||
// first time it is required.
|
||||
func getPassword(q string) {
|
||||
if len(configKey) != 0 {
|
||||
return
|
||||
}
|
||||
for {
|
||||
fmt.Println(q)
|
||||
fmt.Print("password:")
|
||||
err := setPassword(ReadPassword())
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
fmt.Println("Error:", err)
|
||||
}
|
||||
}
|
||||
|
||||
// setPassword will set the configKey to the hash of
|
||||
// the password. If the length of the password is
|
||||
// zero after trimming+normalization, an error is returned.
|
||||
func setPassword(password string) error {
|
||||
if !utf8.ValidString(password) {
|
||||
return errors.New("password contains invalid utf8 characters")
|
||||
}
|
||||
// Remove leading+trailing whitespace
|
||||
password = strings.TrimSpace(password)
|
||||
|
||||
// Normalize to reduce weird variations.
|
||||
password = norm.NFKC.String(password)
|
||||
if len(password) == 0 {
|
||||
return errors.New("no characters in password")
|
||||
}
|
||||
// Create SHA256 has of the password
|
||||
sha := sha256.New()
|
||||
_, err := sha.Write([]byte("[" + password + "][rclone-config]"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
configKey = sha.Sum(nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveConfig saves configuration file.
|
||||
// if configKey has been set, the file will be encrypted.
|
||||
func SaveConfig() {
|
||||
err := goconfig.SaveConfigFile(ConfigFile, ConfigPath)
|
||||
if len(configKey) == 0 {
|
||||
err := goconfig.SaveConfigFile(ConfigFile, ConfigPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to save config file: %v", err)
|
||||
}
|
||||
err = os.Chmod(ConfigPath, 0600)
|
||||
if err != nil {
|
||||
ErrorLog(nil, "Failed to set permissions on config file: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := goconfig.SaveConfigData(ConfigFile, &buf)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to save config file: %v", err)
|
||||
}
|
||||
|
||||
f, err := os.Create(ConfigPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to save config file: %v", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(f, "# Encrypted rclone configuration File")
|
||||
fmt.Fprintln(f, "")
|
||||
fmt.Fprintln(f, "RCLONE_ENCRYPT_V0:")
|
||||
|
||||
// Generate new nonce and write it to the start of the ciphertext
|
||||
var nonce [24]byte
|
||||
n, _ := rand.Read(nonce[:])
|
||||
if n != 24 {
|
||||
log.Fatalf("nonce short read: %d", n)
|
||||
}
|
||||
enc := base64.NewEncoder(base64.StdEncoding, f)
|
||||
_, err = enc.Write(nonce[:])
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write config file: %v", err)
|
||||
}
|
||||
|
||||
var key [32]byte
|
||||
copy(key[:], configKey[:32])
|
||||
|
||||
b := secretbox.Seal(nil, buf.Bytes(), &nonce, &key)
|
||||
_, err = enc.Write(b)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write config file: %v", err)
|
||||
}
|
||||
_ = enc.Close()
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to close config file: %v", err)
|
||||
}
|
||||
|
||||
err = os.Chmod(ConfigPath, 0600)
|
||||
if err != nil {
|
||||
log.Printf("Failed to set permissions on config file: %v", err)
|
||||
ErrorLog(nil, "Failed to set permissions on config file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -341,13 +655,34 @@ func Choose(what string, defaults, help []string, newOk bool) string {
|
||||
}
|
||||
fmt.Println()
|
||||
for i, text := range defaults {
|
||||
var lines []string
|
||||
if help != nil {
|
||||
parts := strings.Split(help[i], "\n")
|
||||
for _, part := range parts {
|
||||
fmt.Printf(" * %s\n", part)
|
||||
lines = append(lines, parts...)
|
||||
}
|
||||
lines = append(lines, fmt.Sprintf("%q", text))
|
||||
pos := i + 1
|
||||
if len(lines) == 1 {
|
||||
fmt.Printf("%2d > %s\n", pos, text)
|
||||
} else {
|
||||
mid := (len(lines) - 1) / 2
|
||||
for i, line := range lines {
|
||||
var sep rune
|
||||
switch i {
|
||||
case 0:
|
||||
sep = '/'
|
||||
case len(lines) - 1:
|
||||
sep = '\\'
|
||||
default:
|
||||
sep = '|'
|
||||
}
|
||||
number := " "
|
||||
if i == mid {
|
||||
number = fmt.Sprintf("%2d", pos)
|
||||
}
|
||||
fmt.Printf("%s %c %s\n", number, sep, line)
|
||||
}
|
||||
}
|
||||
fmt.Printf("%2d) %s\n", i+1, text)
|
||||
}
|
||||
for {
|
||||
fmt.Printf("%s> ", what)
|
||||
@@ -365,6 +700,25 @@ func Choose(what string, defaults, help []string, newOk bool) string {
|
||||
}
|
||||
}
|
||||
|
||||
// ChooseNumber asks the user to enter a number between min and max
|
||||
// inclusive prompting them with what.
|
||||
func ChooseNumber(what string, min, max int) int {
|
||||
for {
|
||||
fmt.Printf("%s> ", what)
|
||||
result := ReadLine()
|
||||
i, err := strconv.Atoi(result)
|
||||
if err != nil {
|
||||
fmt.Printf("Bad number: %v\n", err)
|
||||
continue
|
||||
}
|
||||
if i < min || i > max {
|
||||
fmt.Printf("Out of range - %d to %d inclusive\n", min, max)
|
||||
continue
|
||||
}
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
// ShowRemote shows the contents of the remote
|
||||
func ShowRemote(name string) {
|
||||
fmt.Printf("--------------------\n")
|
||||
@@ -387,7 +741,7 @@ func OkRemote(name string) bool {
|
||||
ConfigFile.DeleteSection(name)
|
||||
return true
|
||||
default:
|
||||
log.Printf("Bad choice %d", i)
|
||||
ErrorLog(nil, "Bad choice %d", i)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -424,14 +778,26 @@ func ChooseOption(o *Option) string {
|
||||
return ReadLine()
|
||||
}
|
||||
|
||||
// fsOption returns an Option describing the possible remotes
|
||||
func fsOption() *Option {
|
||||
o := &Option{
|
||||
Name: "Storage",
|
||||
Help: "Type of storage to configure.",
|
||||
}
|
||||
for _, item := range fsRegistry {
|
||||
example := OptionExample{
|
||||
Value: item.Name,
|
||||
Help: item.Description,
|
||||
}
|
||||
o.Examples = append(o.Examples, example)
|
||||
}
|
||||
o.Examples.Sort()
|
||||
return o
|
||||
}
|
||||
|
||||
// NewRemote make a new remote from its name
|
||||
func NewRemote(name string) {
|
||||
fmt.Printf("What type of source is it?\n")
|
||||
types := []string{}
|
||||
for _, item := range fsRegistry {
|
||||
types = append(types, item.Name)
|
||||
}
|
||||
newType := Choose("type", types, nil, false)
|
||||
newType := ChooseOption(fsOption())
|
||||
ConfigFile.SetValue(name, "type", newType)
|
||||
fs, err := Find(newType)
|
||||
if err != nil {
|
||||
@@ -480,14 +846,14 @@ func DeleteRemote(name string) {
|
||||
func EditConfig() {
|
||||
for {
|
||||
haveRemotes := len(ConfigFile.GetSectionList()) != 0
|
||||
what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "qQuit config"}
|
||||
what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "sSet configuration password", "qQuit config"}
|
||||
if haveRemotes {
|
||||
fmt.Printf("Current remotes:\n\n")
|
||||
ShowRemotes()
|
||||
fmt.Printf("\n")
|
||||
} else {
|
||||
fmt.Printf("No remotes found - make a new one\n")
|
||||
what = append(what[1:2], what[3])
|
||||
what = append(what[1:2], what[3:]...)
|
||||
}
|
||||
switch i := Command(what); i {
|
||||
case 'e':
|
||||
@@ -498,11 +864,14 @@ func EditConfig() {
|
||||
for {
|
||||
fmt.Printf("name> ")
|
||||
name := ReadLine()
|
||||
parts := matcher.FindStringSubmatch(name + ":")
|
||||
switch {
|
||||
case name == "":
|
||||
fmt.Printf("Can't use empty name\n")
|
||||
case isDriveLetter(name):
|
||||
fmt.Printf("Can't use %q as it can be confused a drive letter\n", name)
|
||||
case len(parts) != 3 || parts[2] != "":
|
||||
fmt.Printf("Can't use %q as it has invalid characters in it %v\n", name, parts)
|
||||
default:
|
||||
NewRemote(name)
|
||||
break nameLoop
|
||||
@@ -511,8 +880,104 @@ func EditConfig() {
|
||||
case 'd':
|
||||
name := ChooseRemote()
|
||||
DeleteRemote(name)
|
||||
case 's':
|
||||
SetPassword()
|
||||
case 'q':
|
||||
return
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetPassword will allow the user to modify the current
|
||||
// configuration encryption settings.
|
||||
func SetPassword() {
|
||||
for {
|
||||
if len(configKey) > 0 {
|
||||
fmt.Println("Your configuration is encrypted.")
|
||||
what := []string{"cChange Password", "uUnencrypt configuration", "qQuit to main menu"}
|
||||
switch i := Command(what); i {
|
||||
case 'c':
|
||||
changePassword()
|
||||
SaveConfig()
|
||||
fmt.Println("Password changed")
|
||||
continue
|
||||
case 'u':
|
||||
configKey = nil
|
||||
SaveConfig()
|
||||
continue
|
||||
case 'q':
|
||||
return
|
||||
}
|
||||
|
||||
} else {
|
||||
fmt.Println("Your configuration is not encrypted.")
|
||||
fmt.Println("If you add a password, you will protect your login information to cloud services.")
|
||||
what := []string{"aAdd Password", "qQuit to main menu"}
|
||||
switch i := Command(what); i {
|
||||
case 'a':
|
||||
changePassword()
|
||||
SaveConfig()
|
||||
fmt.Println("Password set")
|
||||
continue
|
||||
case 'q':
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// changePassword will query the user twice
|
||||
// for a password. If the same password is entered
|
||||
// twice the key is updated.
|
||||
func changePassword() {
|
||||
for {
|
||||
configKey = nil
|
||||
getPassword("Enter NEW configuration password:")
|
||||
a := configKey
|
||||
// re-enter password
|
||||
configKey = nil
|
||||
getPassword("Confirm NEW password:")
|
||||
b := configKey
|
||||
if bytes.Equal(a, b) {
|
||||
return
|
||||
}
|
||||
fmt.Println("Passwords does not match!")
|
||||
}
|
||||
}
|
||||
|
||||
// Authorize is for remote authorization of headless machines.
|
||||
//
|
||||
// It expects 1 or 3 arguments
|
||||
//
|
||||
// rclone authorize "fs name"
|
||||
// rclone authorize "fs name" "client id" "client secret"
|
||||
func Authorize(args []string) {
|
||||
switch len(args) {
|
||||
case 1, 3:
|
||||
default:
|
||||
log.Fatalf("Invalid number of arguments: %d", len(args))
|
||||
}
|
||||
newType := args[0]
|
||||
fs, err := Find(newType)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to find fs: %v", err)
|
||||
}
|
||||
|
||||
if fs.Config == nil {
|
||||
log.Fatalf("Can't authorize fs %q", newType)
|
||||
}
|
||||
// Name used for temporary fs
|
||||
name := "**temp-fs**"
|
||||
|
||||
// Make sure we delete it
|
||||
defer DeleteRemote(name)
|
||||
|
||||
// Indicate that we want fully automatic configuration.
|
||||
ConfigFile.SetValue(name, ConfigAutomatic, "yes")
|
||||
if len(args) == 3 {
|
||||
ConfigFile.SetValue(name, ConfigClientID, args[1])
|
||||
ConfigFile.SetValue(name, ConfigClientSecret, args[2])
|
||||
}
|
||||
fs.Config(name)
|
||||
}
|
||||
|
||||
26
fs/config_read_password.go
Normal file
26
fs/config_read_password.go
Normal file
@@ -0,0 +1,26 @@
|
||||
// ReadPassword for OSes which are supported by golang.org/x/crypto/ssh/terminal
|
||||
// See https://github.com/golang/go/issues/14441 - plan9
|
||||
// https://github.com/golang/go/issues/13085 - solaris
|
||||
|
||||
// +build !solaris,!plan9
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
// ReadPassword reads a password without echoing it to the terminal.
|
||||
func ReadPassword() string {
|
||||
line, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
||||
fmt.Println("")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read password: %v", err)
|
||||
}
|
||||
return strings.TrimSpace(string(line))
|
||||
}
|
||||
12
fs/config_read_password_unsupported.go
Normal file
12
fs/config_read_password_unsupported.go
Normal file
@@ -0,0 +1,12 @@
|
||||
// ReadPassword for OSes which are not supported by golang.org/x/crypto/ssh/terminal
|
||||
// See https://github.com/golang/go/issues/14441 - plan9
|
||||
// https://github.com/golang/go/issues/13085 - solaris
|
||||
|
||||
// +build solaris plan9
|
||||
|
||||
package fs
|
||||
|
||||
// ReadPassword reads a password with echoing it to the terminal.
|
||||
func ReadPassword() string {
|
||||
return ReadLine()
|
||||
}
|
||||
@@ -1,6 +1,11 @@
|
||||
package fs
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSizeSuffixString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
@@ -8,28 +13,51 @@ func TestSizeSuffixString(t *testing.T) {
|
||||
want string
|
||||
}{
|
||||
{0, "0"},
|
||||
{102, "0.100k"},
|
||||
{102, "102"},
|
||||
{1024, "1k"},
|
||||
{1024 * 1024, "1M"},
|
||||
{1024 * 1024 * 1024, "1G"},
|
||||
{10 * 1024 * 1024 * 1024, "10G"},
|
||||
{10.1 * 1024 * 1024 * 1024, "10.100G"},
|
||||
{-1, "off"},
|
||||
{-100, "off"},
|
||||
} {
|
||||
ss := SizeSuffix(test.in)
|
||||
got := ss.String()
|
||||
if test.want != got {
|
||||
t.Errorf("Want %v got %v", test.want, got)
|
||||
}
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSizeSuffixUnit(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in float64
|
||||
want string
|
||||
}{
|
||||
{0, "0 Bytes"},
|
||||
{102, "102 Bytes"},
|
||||
{1024, "1 kBytes"},
|
||||
{1024 * 1024, "1 MBytes"},
|
||||
{1024 * 1024 * 1024, "1 GBytes"},
|
||||
{10 * 1024 * 1024 * 1024, "10 GBytes"},
|
||||
{10.1 * 1024 * 1024 * 1024, "10.100 GBytes"},
|
||||
{-1, "off"},
|
||||
{-100, "off"},
|
||||
} {
|
||||
ss := SizeSuffix(test.in)
|
||||
got := ss.Unit("Bytes")
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSizeSuffixSet(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want int64
|
||||
err bool
|
||||
}{
|
||||
{"0", 0, false},
|
||||
{"1b", 1, false},
|
||||
{"102B", 102, false},
|
||||
{"0.1k", 102, false},
|
||||
{"0.1", 102, false},
|
||||
{"1K", 1024, false},
|
||||
@@ -38,6 +66,8 @@ func TestSizeSuffixSet(t *testing.T) {
|
||||
{"1M", 1024 * 1024, false},
|
||||
{"1.g", 1024 * 1024 * 1024, false},
|
||||
{"10G", 10 * 1024 * 1024 * 1024, false},
|
||||
{"off", -1, false},
|
||||
{"OFF", -1, false},
|
||||
{"", 0, true},
|
||||
{"1p", 0, true},
|
||||
{"1.p", 0, true},
|
||||
@@ -46,13 +76,12 @@ func TestSizeSuffixSet(t *testing.T) {
|
||||
} {
|
||||
ss := SizeSuffix(0)
|
||||
err := ss.Set(test.in)
|
||||
if (err != nil) != test.err {
|
||||
t.Errorf("%d: Expecting error %v but got error %v", i, test.err, err)
|
||||
}
|
||||
got := int64(ss)
|
||||
if test.want != got {
|
||||
t.Errorf("%d: Want %v got %v", i, test.want, got)
|
||||
if test.err {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, test.want, int64(ss))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,11 +94,120 @@ func TestReveal(t *testing.T) {
|
||||
{"2sTcyNrA", "potato"},
|
||||
} {
|
||||
got := Reveal(test.in)
|
||||
if got != test.want {
|
||||
t.Errorf("%q: want %q got %q", test.in, test.want, got)
|
||||
}
|
||||
if Obscure(got) != test.in {
|
||||
t.Errorf("%q: wasn't bidirectional", test.in)
|
||||
}
|
||||
assert.Equal(t, test.want, got)
|
||||
assert.Equal(t, test.in, Obscure(got), "not bidirectional")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigLoad(t *testing.T) {
|
||||
oldConfigPath := ConfigPath
|
||||
ConfigPath = "./testdata/plain.conf"
|
||||
defer func() {
|
||||
ConfigPath = oldConfigPath
|
||||
}()
|
||||
configKey = nil // reset password
|
||||
c, err := loadConfigFile()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sections := c.GetSectionList()
|
||||
var expect = []string{"RCLONE_ENCRYPT_V0", "nounc", "unc"}
|
||||
assert.Equal(t, expect, sections)
|
||||
|
||||
keys := c.GetKeyList("nounc")
|
||||
expect = []string{"type", "nounc"}
|
||||
assert.Equal(t, expect, keys)
|
||||
}
|
||||
|
||||
func TestConfigLoadEncrypted(t *testing.T) {
|
||||
var err error
|
||||
oldConfigPath := ConfigPath
|
||||
ConfigPath = "./testdata/encrypted.conf"
|
||||
defer func() {
|
||||
ConfigPath = oldConfigPath
|
||||
configKey = nil // reset password
|
||||
}()
|
||||
|
||||
// Set correct password
|
||||
err = setPassword("asdf")
|
||||
require.NoError(t, err)
|
||||
c, err := loadConfigFile()
|
||||
require.NoError(t, err)
|
||||
sections := c.GetSectionList()
|
||||
var expect = []string{"nounc", "unc"}
|
||||
assert.Equal(t, expect, sections)
|
||||
|
||||
keys := c.GetKeyList("nounc")
|
||||
expect = []string{"type", "nounc"}
|
||||
assert.Equal(t, expect, keys)
|
||||
}
|
||||
|
||||
func TestConfigLoadEncryptedFailures(t *testing.T) {
|
||||
var err error
|
||||
|
||||
// This file should be too short to be decoded.
|
||||
oldConfigPath := ConfigPath
|
||||
ConfigPath = "./testdata/enc-short.conf"
|
||||
defer func() { ConfigPath = oldConfigPath }()
|
||||
_, err = loadConfigFile()
|
||||
require.Error(t, err)
|
||||
|
||||
// This file contains invalid base64 characters.
|
||||
ConfigPath = "./testdata/enc-invalid.conf"
|
||||
_, err = loadConfigFile()
|
||||
require.Error(t, err)
|
||||
|
||||
// This file contains invalid base64 characters.
|
||||
ConfigPath = "./testdata/enc-too-new.conf"
|
||||
_, err = loadConfigFile()
|
||||
require.Error(t, err)
|
||||
|
||||
// This file contains invalid base64 characters.
|
||||
ConfigPath = "./testdata/filenotfound.conf"
|
||||
c, err := loadConfigFile()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, c.GetSectionList(), 0, "Expected 0-length section")
|
||||
}
|
||||
|
||||
func TestPassword(t *testing.T) {
|
||||
defer func() {
|
||||
configKey = nil // reset password
|
||||
}()
|
||||
var err error
|
||||
// Empty password should give error
|
||||
err = setPassword(" \t ")
|
||||
require.Error(t, err)
|
||||
|
||||
// Test invalid utf8 sequence
|
||||
err = setPassword(string([]byte{0xff, 0xfe, 0xfd}) + "abc")
|
||||
require.Error(t, err)
|
||||
|
||||
// Simple check of wrong passwords
|
||||
hashedKeyCompare(t, "mis", "match", false)
|
||||
|
||||
// Check that passwords match with trimmed whitespace
|
||||
hashedKeyCompare(t, " abcdef \t", "abcdef", true)
|
||||
|
||||
// Check that passwords match after unicode normalization
|
||||
hashedKeyCompare(t, "ff\u0041\u030A", "ffÅ", true)
|
||||
|
||||
// Check that passwords preserves case
|
||||
hashedKeyCompare(t, "abcdef", "ABCDEF", false)
|
||||
|
||||
}
|
||||
|
||||
func hashedKeyCompare(t *testing.T, a, b string, shouldMatch bool) {
|
||||
err := setPassword(a)
|
||||
require.NoError(t, err)
|
||||
k1 := configKey
|
||||
|
||||
err = setPassword(b)
|
||||
require.NoError(t, err)
|
||||
k2 := configKey
|
||||
|
||||
if shouldMatch {
|
||||
assert.Equal(t, k1, k2)
|
||||
} else {
|
||||
assert.NotEqual(t, k1, k2)
|
||||
}
|
||||
}
|
||||
|
||||
235
fs/error.go
Normal file
235
fs/error.go
Normal file
@@ -0,0 +1,235 @@
|
||||
// Errors and error handling
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Retrier is an optional interface for error as to whether the
|
||||
// operation should be retried at a high level.
|
||||
//
|
||||
// This should be returned from Update or Put methods as required
|
||||
type Retrier interface {
|
||||
error
|
||||
Retry() bool
|
||||
}
|
||||
|
||||
// retryError is a type of error
|
||||
type retryError string
|
||||
|
||||
// Error interface
|
||||
func (r retryError) Error() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
// Retry interface
|
||||
func (r retryError) Retry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retrier = retryError("")
|
||||
|
||||
// RetryErrorf makes an error which indicates it would like to be retried
|
||||
func RetryErrorf(format string, a ...interface{}) error {
|
||||
return retryError(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// wrappedRetryError is an error wrapped so it will satisfy the
|
||||
// Retrier interface and return true
|
||||
type wrappedRetryError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// Retry interface
|
||||
func (err wrappedRetryError) Retry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retrier = wrappedRetryError{(error)(nil)}
|
||||
|
||||
// RetryError makes an error which indicates it would like to be retried
|
||||
func RetryError(err error) error {
|
||||
return wrappedRetryError{err}
|
||||
}
|
||||
|
||||
// IsRetryError returns true if err conforms to the Retry interface
|
||||
// and calling the Retry method returns true.
|
||||
func IsRetryError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
err = errors.Cause(err)
|
||||
if r, ok := err.(Retrier); ok {
|
||||
return r.Retry()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Fataler is an optional interface for error as to whether the
|
||||
// operation should cause the entire operation to finish immediately.
|
||||
//
|
||||
// This should be returned from Update or Put methods as required
|
||||
type Fataler interface {
|
||||
error
|
||||
Fatal() bool
|
||||
}
|
||||
|
||||
// wrappedFatalError is an error wrapped so it will satisfy the
|
||||
// Retrier interface and return true
|
||||
type wrappedFatalError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// Fatal interface
|
||||
func (err wrappedFatalError) Fatal() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Fataler = wrappedFatalError{(error)(nil)}
|
||||
|
||||
// FatalError makes an error which indicates it is a fatal error and
|
||||
// the sync should stop.
|
||||
func FatalError(err error) error {
|
||||
return wrappedFatalError{err}
|
||||
}
|
||||
|
||||
// IsFatalError returns true if err conforms to the Fatal interface
|
||||
// and calling the Fatal method returns true.
|
||||
func IsFatalError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
err = errors.Cause(err)
|
||||
if r, ok := err.(Fataler); ok {
|
||||
return r.Fatal()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NoRetrier is an optional interface for error as to whether the
|
||||
// operation should not be retried at a high level.
|
||||
//
|
||||
// If only NoRetry errors are returned in a sync then the sync won't
|
||||
// be retried.
|
||||
//
|
||||
// This should be returned from Update or Put methods as required
|
||||
type NoRetrier interface {
|
||||
error
|
||||
NoRetry() bool
|
||||
}
|
||||
|
||||
// wrappedNoRetryError is an error wrapped so it will satisfy the
|
||||
// Retrier interface and return true
|
||||
type wrappedNoRetryError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// NoRetry interface
|
||||
func (err wrappedNoRetryError) NoRetry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ NoRetrier = wrappedNoRetryError{(error)(nil)}
|
||||
|
||||
// NoRetryError makes an error which indicates the sync shouldn't be
|
||||
// retried.
|
||||
func NoRetryError(err error) error {
|
||||
return wrappedNoRetryError{err}
|
||||
}
|
||||
|
||||
// IsNoRetryError returns true if err conforms to the NoRetry
|
||||
// interface and calling the NoRetry method returns true.
|
||||
func IsNoRetryError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
err = errors.Cause(err)
|
||||
if r, ok := err.(NoRetrier); ok {
|
||||
return r.NoRetry()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isClosedConnError reports whether err is an error from use of a closed
|
||||
// network connection.
|
||||
//
|
||||
// Code adapted from net/http
|
||||
func isClosedConnError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Note that this error isn't exported so we have to do a
|
||||
// string comparison :-(
|
||||
str := err.Error()
|
||||
if strings.Contains(str, "use of closed network connection") {
|
||||
return true
|
||||
}
|
||||
|
||||
return isClosedConnErrorPlatform(err)
|
||||
}
|
||||
|
||||
// ShouldRetry looks at an error and tries to work out if retrying the
|
||||
// operation that caused it would be a good idea. It returns true if
|
||||
// the error implements Timeout() or Temporary() or if the error
|
||||
// indicates a premature closing of the connection.
|
||||
func ShouldRetry(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Find root cause if available
|
||||
err = errors.Cause(err)
|
||||
|
||||
// Unwrap url.Error
|
||||
if urlErr, ok := err.(*url.Error); ok {
|
||||
err = urlErr.Err
|
||||
}
|
||||
|
||||
// Look for premature closing of connection
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check for net error Timeout()
|
||||
if x, ok := err.(interface {
|
||||
Timeout() bool
|
||||
}); ok && x.Timeout() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check for net error Temporary()
|
||||
if x, ok := err.(interface {
|
||||
Temporary() bool
|
||||
}); ok && x.Temporary() {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ShouldRetryHTTP returns a boolean as to whether this resp deserves.
|
||||
// It checks to see if the HTTP response code is in the slice
|
||||
// retryErrorCodes.
|
||||
func ShouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
for _, e := range retryErrorCodes {
|
||||
if resp.StatusCode == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
290
fs/filter.go
290
fs/filter.go
@@ -6,9 +6,13 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
@@ -23,15 +27,17 @@ var (
|
||||
includeRule = pflag.StringP("include", "", "", "Include files matching pattern")
|
||||
includeFrom = pflag.StringP("include-from", "", "", "Read include patterns from file")
|
||||
filesFrom = pflag.StringP("files-from", "", "", "Read list of source-file names from file")
|
||||
minSize SizeSuffix
|
||||
maxSize SizeSuffix
|
||||
minAge = pflag.StringP("min-age", "", "", "Don't transfer any file younger than this in s or suffix ms|s|m|h|d|w|M|y")
|
||||
maxAge = pflag.StringP("max-age", "", "", "Don't transfer any file older than this in s or suffix ms|s|m|h|d|w|M|y")
|
||||
minSize = SizeSuffix(-1)
|
||||
maxSize = SizeSuffix(-1)
|
||||
dumpFilters = pflag.BoolP("dump-filters", "", false, "Dump the filters to the output")
|
||||
//cvsExclude = pflag.BoolP("cvs-exclude", "C", false, "Exclude files in the same way CVS does")
|
||||
)
|
||||
|
||||
func init() {
|
||||
pflag.VarP(&minSize, "min-size", "", "Don't transfer any file smaller than this in k or suffix k|M|G")
|
||||
pflag.VarP(&maxSize, "max-size", "", "Don't transfer any file larger than this in k or suffix k|M|G")
|
||||
pflag.VarP(&minSize, "min-size", "", "Don't transfer any file smaller than this in k or suffix b|k|M|G")
|
||||
pflag.VarP(&maxSize, "max-size", "", "Don't transfer any file larger than this in k or suffix b|k|M|G")
|
||||
}
|
||||
|
||||
// rule is one filter rule
|
||||
@@ -54,6 +60,40 @@ func (r *rule) String() string {
|
||||
return fmt.Sprintf("%s %s", c, r.Regexp.String())
|
||||
}
|
||||
|
||||
// rules is a slice of rules
|
||||
type rules struct {
|
||||
rules []rule
|
||||
existing map[string]struct{}
|
||||
}
|
||||
|
||||
// add adds a rule if it doesn't exist already
|
||||
func (rs *rules) add(Include bool, re *regexp.Regexp) {
|
||||
if rs.existing == nil {
|
||||
rs.existing = make(map[string]struct{})
|
||||
}
|
||||
newRule := rule{
|
||||
Include: Include,
|
||||
Regexp: re,
|
||||
}
|
||||
newRuleString := newRule.String()
|
||||
if _, ok := rs.existing[newRuleString]; ok {
|
||||
return // rule already exists
|
||||
}
|
||||
rs.rules = append(rs.rules, newRule)
|
||||
rs.existing[newRuleString] = struct{}{}
|
||||
}
|
||||
|
||||
// clear clears all the rules
|
||||
func (rs *rules) clear() {
|
||||
rs.rules = nil
|
||||
rs.existing = nil
|
||||
}
|
||||
|
||||
// len returns the number of rules
|
||||
func (rs *rules) len() int {
|
||||
return len(rs.rules)
|
||||
}
|
||||
|
||||
// filesMap describes the map of files to transfer
|
||||
type filesMap map[string]struct{}
|
||||
|
||||
@@ -62,8 +102,50 @@ type Filter struct {
|
||||
DeleteExcluded bool
|
||||
MinSize int64
|
||||
MaxSize int64
|
||||
rules []rule
|
||||
files filesMap
|
||||
ModTimeFrom time.Time
|
||||
ModTimeTo time.Time
|
||||
fileRules rules
|
||||
dirRules rules
|
||||
files filesMap // files if filesFrom
|
||||
dirs filesMap // dirs from filesFrom
|
||||
}
|
||||
|
||||
// We use time conventions
|
||||
var ageSuffixes = []struct {
|
||||
Suffix string
|
||||
Multiplier time.Duration
|
||||
}{
|
||||
{Suffix: "ms", Multiplier: time.Millisecond},
|
||||
{Suffix: "s", Multiplier: time.Second},
|
||||
{Suffix: "m", Multiplier: time.Minute},
|
||||
{Suffix: "h", Multiplier: time.Hour},
|
||||
{Suffix: "d", Multiplier: time.Hour * 24},
|
||||
{Suffix: "w", Multiplier: time.Hour * 24 * 7},
|
||||
{Suffix: "M", Multiplier: time.Hour * 24 * 30},
|
||||
{Suffix: "y", Multiplier: time.Hour * 24 * 365},
|
||||
|
||||
// Default to second
|
||||
{Suffix: "", Multiplier: time.Second},
|
||||
}
|
||||
|
||||
// ParseDuration parses a duration string. Accept ms|s|m|h|d|w|M|y suffixes. Defaults to second if not provided
|
||||
func ParseDuration(age string) (time.Duration, error) {
|
||||
var period float64
|
||||
|
||||
for _, ageSuffix := range ageSuffixes {
|
||||
if strings.HasSuffix(age, ageSuffix.Suffix) {
|
||||
numberString := age[:len(age)-len(ageSuffix.Suffix)]
|
||||
var err error
|
||||
period, err = strconv.ParseFloat(numberString, 64)
|
||||
if err != nil {
|
||||
return time.Duration(0), err
|
||||
}
|
||||
period *= float64(ageSuffix.Multiplier)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return time.Duration(period), nil
|
||||
}
|
||||
|
||||
// NewFilter parses the command line options and creates a Filter object
|
||||
@@ -73,16 +155,14 @@ func NewFilter() (f *Filter, err error) {
|
||||
MinSize: int64(minSize),
|
||||
MaxSize: int64(maxSize),
|
||||
}
|
||||
addImplicitExclude := false
|
||||
|
||||
if *includeRule != "" {
|
||||
err = f.Add(true, *includeRule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add implicit exclude
|
||||
err = f.Add(false, "*")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addImplicitExclude = true
|
||||
}
|
||||
if *includeFrom != "" {
|
||||
err := forEachLine(*includeFrom, func(line string) error {
|
||||
@@ -91,11 +171,7 @@ func NewFilter() (f *Filter, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add implicit exclude
|
||||
err = f.Add(false, "*")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addImplicitExclude = true
|
||||
}
|
||||
if *excludeRule != "" {
|
||||
err = f.Add(false, *excludeRule)
|
||||
@@ -131,6 +207,31 @@ func NewFilter() (f *Filter, err error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if addImplicitExclude {
|
||||
err = f.Add(false, "/**")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if *minAge != "" {
|
||||
duration, err := ParseDuration(*minAge)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.ModTimeTo = time.Now().Add(-duration)
|
||||
Debug(nil, "--min-age %v to %v", duration, f.ModTimeTo)
|
||||
}
|
||||
if *maxAge != "" {
|
||||
duration, err := ParseDuration(*maxAge)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.ModTimeFrom = time.Now().Add(-duration)
|
||||
if !f.ModTimeTo.IsZero() && f.ModTimeTo.Before(f.ModTimeFrom) {
|
||||
return nil, errors.New("argument --min-age can't be larger than --max-age")
|
||||
}
|
||||
Debug(nil, "--max-age %v to %v", duration, f.ModTimeFrom)
|
||||
}
|
||||
if *dumpFilters {
|
||||
fmt.Println("--- start filters ---")
|
||||
fmt.Println(f.DumpFilters())
|
||||
@@ -139,17 +240,49 @@ func NewFilter() (f *Filter, err error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// addDirGlobs adds directory globs from the file glob passed in
|
||||
func (f *Filter) addDirGlobs(Include bool, glob string) error {
|
||||
for _, dirGlob := range globToDirGlobs(glob) {
|
||||
// Don't add "/" as we always include the root
|
||||
if dirGlob == "/" {
|
||||
continue
|
||||
}
|
||||
dirRe, err := globToRegexp(dirGlob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dirRules.add(Include, dirRe)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add adds a filter rule with include or exclude status indicated
|
||||
func (f *Filter) Add(Include bool, glob string) error {
|
||||
isDirRule := strings.HasSuffix(glob, "/")
|
||||
isFileRule := !isDirRule
|
||||
if strings.HasSuffix(glob, "**") {
|
||||
isDirRule, isFileRule = true, true
|
||||
}
|
||||
re, err := globToRegexp(glob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rule := rule{
|
||||
Include: Include,
|
||||
Regexp: re,
|
||||
if isFileRule {
|
||||
f.fileRules.add(Include, re)
|
||||
// If include rule work out what directories are needed to scan
|
||||
// if exclude rule, we can't rule anything out
|
||||
// Unless it is `*` which matches everything
|
||||
// NB ** and /** are DirRules
|
||||
if Include || glob == "*" {
|
||||
err = f.addDirGlobs(Include, glob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if isDirRule {
|
||||
f.dirRules.add(Include, re)
|
||||
}
|
||||
f.rules = append(f.rules, rule)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -174,39 +307,51 @@ func (f *Filter) AddRule(rule string) error {
|
||||
case strings.HasPrefix(rule, "+ "):
|
||||
return f.Add(true, rule[2:])
|
||||
}
|
||||
return fmt.Errorf("Malformed rule %q", rule)
|
||||
return errors.Errorf("malformed rule %q", rule)
|
||||
}
|
||||
|
||||
// AddFile adds a single file to the files from list
|
||||
func (f *Filter) AddFile(file string) error {
|
||||
if f.files == nil {
|
||||
f.files = make(filesMap)
|
||||
f.dirs = make(filesMap)
|
||||
}
|
||||
file = strings.Trim(file, "/")
|
||||
f.files[file] = struct{}{}
|
||||
// Put all the parent directories into f.dirs
|
||||
for {
|
||||
file = path.Dir(file)
|
||||
if file == "." {
|
||||
break
|
||||
}
|
||||
if _, found := f.dirs[file]; found {
|
||||
break
|
||||
}
|
||||
f.dirs[file] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear clears all the filter rules
|
||||
func (f *Filter) Clear() {
|
||||
f.rules = nil
|
||||
f.fileRules.clear()
|
||||
f.dirRules.clear()
|
||||
}
|
||||
|
||||
// Include returns whether this object should be included into the
|
||||
// sync or not
|
||||
func (f *Filter) Include(remote string, size int64) bool {
|
||||
// filesFrom takes precedence
|
||||
if f.files != nil {
|
||||
_, include := f.files[remote]
|
||||
return include
|
||||
}
|
||||
if f.MinSize != 0 && size < f.MinSize {
|
||||
return false
|
||||
}
|
||||
if f.MaxSize != 0 && size > f.MaxSize {
|
||||
return false
|
||||
}
|
||||
for _, rule := range f.rules {
|
||||
// InActive returns false if any filters are active
|
||||
func (f *Filter) InActive() bool {
|
||||
return (f.files == nil &&
|
||||
f.ModTimeFrom.IsZero() &&
|
||||
f.ModTimeTo.IsZero() &&
|
||||
f.MinSize < 0 &&
|
||||
f.MaxSize < 0 &&
|
||||
f.fileRules.len() == 0 &&
|
||||
f.dirRules.len() == 0)
|
||||
}
|
||||
|
||||
// includeRemote returns whether this remote passes the filter rules.
|
||||
func (f *Filter) includeRemote(remote string) bool {
|
||||
for _, rule := range f.fileRules.rules {
|
||||
if rule.Match(remote) {
|
||||
return rule.Include
|
||||
}
|
||||
@@ -214,6 +359,62 @@ func (f *Filter) Include(remote string, size int64) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IncludeDirectory returns whether this directory should be included
|
||||
// in the sync or not.
|
||||
func (f *Filter) IncludeDirectory(remote string) bool {
|
||||
remote = strings.Trim(remote, "/")
|
||||
// filesFrom takes precedence
|
||||
if f.files != nil {
|
||||
_, include := f.dirs[remote]
|
||||
return include
|
||||
}
|
||||
remote += "/"
|
||||
for _, rule := range f.dirRules.rules {
|
||||
if rule.Match(remote) {
|
||||
return rule.Include
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Include returns whether this object should be included into the
|
||||
// sync or not
|
||||
func (f *Filter) Include(remote string, size int64, modTime time.Time) bool {
|
||||
// filesFrom takes precedence
|
||||
if f.files != nil {
|
||||
_, include := f.files[remote]
|
||||
return include
|
||||
}
|
||||
if !f.ModTimeFrom.IsZero() && modTime.Before(f.ModTimeFrom) {
|
||||
return false
|
||||
}
|
||||
if !f.ModTimeTo.IsZero() && modTime.After(f.ModTimeTo) {
|
||||
return false
|
||||
}
|
||||
if f.MinSize >= 0 && size < f.MinSize {
|
||||
return false
|
||||
}
|
||||
if f.MaxSize >= 0 && size > f.MaxSize {
|
||||
return false
|
||||
}
|
||||
return f.includeRemote(remote)
|
||||
}
|
||||
|
||||
// IncludeObject returns whether this object should be included into
|
||||
// the sync or not. This is a convenience function to avoid calling
|
||||
// o.ModTime(), which is an expensive operation.
|
||||
func (f *Filter) IncludeObject(o Object) bool {
|
||||
var modTime time.Time
|
||||
|
||||
if !f.ModTimeFrom.IsZero() || !f.ModTimeTo.IsZero() {
|
||||
modTime = o.ModTime()
|
||||
} else {
|
||||
modTime = time.Unix(0, 0)
|
||||
}
|
||||
|
||||
return f.Include(o.Remote(), o.Size(), modTime)
|
||||
}
|
||||
|
||||
// forEachLine calls fn on every line in the file pointed to by path
|
||||
//
|
||||
// It ignores empty lines and lines starting with '#' or ';'
|
||||
@@ -222,7 +423,7 @@ func forEachLine(path string, fn func(string) error) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer checkClose(in, &err)
|
||||
defer CheckClose(in, &err)
|
||||
scanner := bufio.NewScanner(in)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
@@ -241,8 +442,19 @@ func forEachLine(path string, fn func(string) error) (err error) {
|
||||
// DumpFilters dumps the filters in textual form, 1 per line
|
||||
func (f *Filter) DumpFilters() string {
|
||||
rules := []string{}
|
||||
for _, rule := range f.rules {
|
||||
if !f.ModTimeFrom.IsZero() {
|
||||
rules = append(rules, fmt.Sprintf("Last-modified date must be equal or greater than: %s", f.ModTimeFrom.String()))
|
||||
}
|
||||
if !f.ModTimeTo.IsZero() {
|
||||
rules = append(rules, fmt.Sprintf("Last-modified date must be equal or less than: %s", f.ModTimeTo.String()))
|
||||
}
|
||||
rules = append(rules, "--- File filter rules ---")
|
||||
for _, rule := range f.fileRules.rules {
|
||||
rules = append(rules, rule.String())
|
||||
}
|
||||
rules = append(rules, "--- Directory filter rules ---")
|
||||
for _, dirRule := range f.dirRules.rules {
|
||||
rules = append(rules, dirRule.String())
|
||||
}
|
||||
return strings.Join(rules, "\n")
|
||||
}
|
||||
|
||||
@@ -5,28 +5,53 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAgeSuffix(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want float64
|
||||
err bool
|
||||
}{
|
||||
{"0", 0, false},
|
||||
{"", 0, true},
|
||||
{"1ms", float64(time.Millisecond), false},
|
||||
{"1s", float64(time.Second), false},
|
||||
{"1m", float64(time.Minute), false},
|
||||
{"1h", float64(time.Hour), false},
|
||||
{"1d", float64(time.Hour) * 24, false},
|
||||
{"1w", float64(time.Hour) * 24 * 7, false},
|
||||
{"1M", float64(time.Hour) * 24 * 30, false},
|
||||
{"1y", float64(time.Hour) * 24 * 365, false},
|
||||
{"1.5y", float64(time.Hour) * 24 * 365 * 1.5, false},
|
||||
{"-1s", -float64(time.Second), false},
|
||||
{"1.s", float64(time.Second), false},
|
||||
{"1x", 0, true},
|
||||
} {
|
||||
duration, err := ParseDuration(test.in)
|
||||
if test.err {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, test.want, float64(duration))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFilterDefault(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if f.DeleteExcluded != false {
|
||||
t.Errorf("DeleteExcluded want false got %v", f.DeleteExcluded)
|
||||
}
|
||||
if f.MinSize != 0 {
|
||||
t.Errorf("MinSize want 0 got %v", f.MinSize)
|
||||
}
|
||||
if f.MaxSize != 0 {
|
||||
t.Errorf("MaxSize want 0 got %v", f.MaxSize)
|
||||
}
|
||||
if len(f.rules) != 0 {
|
||||
t.Errorf("rules want non got %v", f.rules)
|
||||
}
|
||||
if f.files != nil {
|
||||
t.Errorf("files want none got %v", f.files)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.False(t, f.DeleteExcluded)
|
||||
assert.Equal(t, int64(-1), f.MinSize)
|
||||
assert.Equal(t, int64(-1), f.MaxSize)
|
||||
assert.Len(t, f.fileRules.rules, 0)
|
||||
assert.Len(t, f.dirRules.rules, 0)
|
||||
assert.Nil(t, f.files)
|
||||
assert.True(t, f.InActive())
|
||||
}
|
||||
|
||||
// return a pointer to the string
|
||||
@@ -37,14 +62,13 @@ func stringP(s string) *string {
|
||||
// testFile creates a temp file with the contents
|
||||
func testFile(t *testing.T, contents string) *string {
|
||||
out, err := ioutil.TempFile("", "filter_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer out.Close()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := out.Close()
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
_, err = out.Write([]byte(contents))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
s := out.Name()
|
||||
return &s
|
||||
}
|
||||
@@ -80,8 +104,8 @@ func TestNewFilterFull(t *testing.T) {
|
||||
rm(*excludeFrom)
|
||||
rm(*includeFrom)
|
||||
rm(*filesFrom)
|
||||
minSize = 0
|
||||
maxSize = 0
|
||||
minSize = -1
|
||||
maxSize = -1
|
||||
deleteExcluded = &isFalse
|
||||
filterRule = &emptyString
|
||||
filterFrom = &emptyString
|
||||
@@ -93,137 +117,230 @@ func TestNewFilterFull(t *testing.T) {
|
||||
}()
|
||||
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if f.DeleteExcluded != true {
|
||||
t.Errorf("DeleteExcluded want true got %v", f.DeleteExcluded)
|
||||
}
|
||||
if f.MinSize != mins {
|
||||
t.Errorf("MinSize want %v got %v", mins, f.MinSize)
|
||||
}
|
||||
if f.MaxSize != maxs {
|
||||
t.Errorf("MaxSize want %v got %v", maxs, f.MaxSize)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.True(t, f.DeleteExcluded)
|
||||
assert.Equal(t, f.MinSize, mins)
|
||||
assert.Equal(t, f.MaxSize, maxs)
|
||||
got := f.DumpFilters()
|
||||
want := `+ (^|/)include1$
|
||||
- (^|/)[^/]*$
|
||||
want := `--- File filter rules ---
|
||||
+ (^|/)include1$
|
||||
+ (^|/)include2$
|
||||
+ (^|/)include3$
|
||||
- (^|/)[^/]*$
|
||||
- (^|/)exclude1$
|
||||
- (^|/)exclude2$
|
||||
- (^|/)exclude3$
|
||||
- (^|/)filter1$
|
||||
+ (^|/)filter2$
|
||||
- (^|/)filter3$`
|
||||
if got != want {
|
||||
t.Errorf("rules want %s got %s", want, got)
|
||||
}
|
||||
if len(f.files) != 2 {
|
||||
t.Errorf("files want 2 got %v", f.files)
|
||||
}
|
||||
- (^|/)filter3$
|
||||
- ^.*$
|
||||
--- Directory filter rules ---
|
||||
+ ^.*$
|
||||
- ^.*$`
|
||||
assert.Equal(t, want, got)
|
||||
assert.Len(t, f.files, 2)
|
||||
for _, name := range []string{"files1", "files2"} {
|
||||
_, ok := f.files[name]
|
||||
if !ok {
|
||||
t.Errorf("Didn't find file %q in f.files", name)
|
||||
}
|
||||
}
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
type includeTest struct {
|
||||
in string
|
||||
size int64
|
||||
want bool
|
||||
in string
|
||||
size int64
|
||||
modTime int64
|
||||
want bool
|
||||
}
|
||||
|
||||
func testInclude(t *testing.T, f *Filter, tests []includeTest) {
|
||||
for _, test := range tests {
|
||||
got := f.Include(test.in, test.size)
|
||||
if test.want != got {
|
||||
t.Errorf("%q,%d: want %v got %v", test.in, test.size, test.want, got)
|
||||
}
|
||||
got := f.Include(test.in, test.size, time.Unix(test.modTime, 0))
|
||||
assert.Equal(t, test.want, got, test.in, test.size, test.modTime)
|
||||
}
|
||||
}
|
||||
|
||||
type includeDirTest struct {
|
||||
in string
|
||||
want bool
|
||||
}
|
||||
|
||||
func testDirInclude(t *testing.T, f *Filter, tests []includeDirTest) {
|
||||
for _, test := range tests {
|
||||
got := f.IncludeDirectory(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFilterIncludeFiles(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.AddFile("file1.jpg")
|
||||
f.AddFile("/file2.jpg")
|
||||
require.NoError(t, err)
|
||||
err = f.AddFile("file1.jpg")
|
||||
require.NoError(t, err)
|
||||
err = f.AddFile("/file2.jpg")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, filesMap{
|
||||
"file1.jpg": {},
|
||||
"file2.jpg": {},
|
||||
}, f.files)
|
||||
assert.Equal(t, filesMap{}, f.dirs)
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 0, true},
|
||||
{"file2.jpg", 1, true},
|
||||
{"potato/file2.jpg", 2, false},
|
||||
{"file3.jpg", 3, false},
|
||||
{"file1.jpg", 0, 0, true},
|
||||
{"file2.jpg", 1, 0, true},
|
||||
{"potato/file2.jpg", 2, 0, false},
|
||||
{"file3.jpg", 3, 0, false},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
func TestNewFilterIncludeFilesDirs(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
require.NoError(t, err)
|
||||
for _, path := range []string{
|
||||
"path/to/dir/file1.png",
|
||||
"/path/to/dir/file2.png",
|
||||
"/path/to/file3.png",
|
||||
"/path/to/dir2/file4.png",
|
||||
} {
|
||||
err = f.AddFile(path)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, filesMap{
|
||||
"path": {},
|
||||
"path/to": {},
|
||||
"path/to/dir": {},
|
||||
"path/to/dir2": {},
|
||||
}, f.dirs)
|
||||
testDirInclude(t, f, []includeDirTest{
|
||||
{"path", true},
|
||||
{"path/to", true},
|
||||
{"path/to/", true},
|
||||
{"/path/to", true},
|
||||
{"/path/to/", true},
|
||||
{"path/to/dir", true},
|
||||
{"path/to/dir2", true},
|
||||
{"path/too", false},
|
||||
{"path/three", false},
|
||||
{"four", false},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewFilterMinSize(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
f.MinSize = 100
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 100, true},
|
||||
{"file2.jpg", 101, true},
|
||||
{"potato/file2.jpg", 99, false},
|
||||
{"file1.jpg", 100, 0, true},
|
||||
{"file2.jpg", 101, 0, true},
|
||||
{"potato/file2.jpg", 99, 0, false},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
func TestNewFilterMaxSize(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
f.MaxSize = 100
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 100, true},
|
||||
{"file2.jpg", 101, false},
|
||||
{"potato/file2.jpg", 99, true},
|
||||
{"file1.jpg", 100, 0, true},
|
||||
{"file2.jpg", 101, 0, false},
|
||||
{"potato/file2.jpg", 99, 0, true},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
func TestNewFilterMinAndMaxAge(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
require.NoError(t, err)
|
||||
f.ModTimeFrom = time.Unix(1440000002, 0)
|
||||
f.ModTimeTo = time.Unix(1440000003, 0)
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 100, 1440000000, false},
|
||||
{"file2.jpg", 101, 1440000001, false},
|
||||
{"file3.jpg", 102, 1440000002, true},
|
||||
{"potato/file1.jpg", 98, 1440000003, true},
|
||||
{"potato/file2.jpg", 99, 1440000004, false},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
func TestNewFilterMinAge(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
require.NoError(t, err)
|
||||
f.ModTimeTo = time.Unix(1440000002, 0)
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 100, 1440000000, true},
|
||||
{"file2.jpg", 101, 1440000001, true},
|
||||
{"file3.jpg", 102, 1440000002, true},
|
||||
{"potato/file1.jpg", 98, 1440000003, false},
|
||||
{"potato/file2.jpg", 99, 1440000004, false},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
func TestNewFilterMaxAge(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
require.NoError(t, err)
|
||||
f.ModTimeFrom = time.Unix(1440000002, 0)
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 100, 1440000000, false},
|
||||
{"file2.jpg", 101, 1440000001, false},
|
||||
{"file3.jpg", 102, 1440000002, true},
|
||||
{"potato/file1.jpg", 98, 1440000003, true},
|
||||
{"potato/file2.jpg", 99, 1440000004, true},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
func TestNewFilterMatches(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
add := func(s string) {
|
||||
err := f.AddRule(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
add("+ cleared")
|
||||
add("!")
|
||||
add("- file1.jpg")
|
||||
add("+ file2.png")
|
||||
add("+ *.jpg")
|
||||
add("- *.png")
|
||||
add("- /file1.jpg")
|
||||
add("+ /file2.png")
|
||||
add("+ /*.jpg")
|
||||
add("- /*.png")
|
||||
add("- /potato")
|
||||
add("+ /sausage1")
|
||||
add("+ /sausage2*")
|
||||
add("+ /sausage3**")
|
||||
add("+ /a/*.jpg")
|
||||
add("- *")
|
||||
testInclude(t, f, []includeTest{
|
||||
{"cleared", 100, false},
|
||||
{"file1.jpg", 100, false},
|
||||
{"file2.png", 100, true},
|
||||
{"afile2.png", 100, false},
|
||||
{"file3.jpg", 101, true},
|
||||
{"file4.png", 101, false},
|
||||
{"potato", 101, false},
|
||||
{"sausage1", 101, true},
|
||||
{"sausage1/potato", 101, false},
|
||||
{"sausage2potato", 101, true},
|
||||
{"sausage2/potato", 101, false},
|
||||
{"sausage3/potato", 101, true},
|
||||
{"unicorn", 99, false},
|
||||
{"cleared", 100, 0, false},
|
||||
{"file1.jpg", 100, 0, false},
|
||||
{"file2.png", 100, 0, true},
|
||||
{"afile2.png", 100, 0, false},
|
||||
{"file3.jpg", 101, 0, true},
|
||||
{"file4.png", 101, 0, false},
|
||||
{"potato", 101, 0, false},
|
||||
{"sausage1", 101, 0, true},
|
||||
{"sausage1/potato", 101, 0, false},
|
||||
{"sausage2potato", 101, 0, true},
|
||||
{"sausage2/potato", 101, 0, false},
|
||||
{"sausage3/potato", 101, 0, true},
|
||||
{"a/one.jpg", 101, 0, true},
|
||||
{"a/one.png", 101, 0, false},
|
||||
{"unicorn", 99, 0, false},
|
||||
})
|
||||
testDirInclude(t, f, []includeDirTest{
|
||||
{"sausage1", false},
|
||||
{"sausage2", false},
|
||||
{"sausage2/sub", false},
|
||||
{"sausage2/sub/dir", false},
|
||||
{"sausage3", true},
|
||||
{"sausage3/sub", true},
|
||||
{"sausage3/sub/dir", true},
|
||||
{"sausage4", false},
|
||||
{"a", true},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
func TestFilterForEachLine(t *testing.T) {
|
||||
@@ -238,15 +355,62 @@ three
|
||||
four
|
||||
five
|
||||
six `)
|
||||
defer os.Remove(*file)
|
||||
defer func() {
|
||||
err := os.Remove(*file)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
lines := []string{}
|
||||
forEachLine(*file, func(s string) error {
|
||||
err := forEachLine(*file, func(s string) error {
|
||||
lines = append(lines, s)
|
||||
return nil
|
||||
})
|
||||
got := strings.Join(lines, ",")
|
||||
want := "one,two,three,four,five,six"
|
||||
if want != got {
|
||||
t.Errorf("want %q got %q", want, got)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "one,two,three,four,five,six", strings.Join(lines, ","))
|
||||
}
|
||||
|
||||
func TestFilterMatchesFromDocs(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
glob string
|
||||
included bool
|
||||
file string
|
||||
}{
|
||||
{"file.jpg", true, "file.jpg"},
|
||||
{"file.jpg", true, "directory/file.jpg"},
|
||||
{"file.jpg", false, "afile.jpg"},
|
||||
{"file.jpg", false, "directory/afile.jpg"},
|
||||
{"/file.jpg", true, "file.jpg"},
|
||||
{"/file.jpg", false, "afile.jpg"},
|
||||
{"/file.jpg", false, "directory/file.jpg"},
|
||||
{"*.jpg", true, "file.jpg"},
|
||||
{"*.jpg", true, "directory/file.jpg"},
|
||||
{"*.jpg", false, "file.jpg/anotherfile.png"},
|
||||
{"dir/**", true, "dir/file.jpg"},
|
||||
{"dir/**", true, "dir/dir1/dir2/file.jpg"},
|
||||
{"dir/**", false, "directory/file.jpg"},
|
||||
{"dir/**", false, "adir/file.jpg"},
|
||||
{"l?ss", true, "less"},
|
||||
{"l?ss", true, "lass"},
|
||||
{"l?ss", false, "floss"},
|
||||
{"h[ae]llo", true, "hello"},
|
||||
{"h[ae]llo", true, "hallo"},
|
||||
{"h[ae]llo", false, "hullo"},
|
||||
{"{one,two}_potato", true, "one_potato"},
|
||||
{"{one,two}_potato", true, "two_potato"},
|
||||
{"{one,two}_potato", false, "three_potato"},
|
||||
{"{one,two}_potato", false, "_potato"},
|
||||
{"\\*.jpg", true, "*.jpg"},
|
||||
{"\\\\.jpg", true, "\\.jpg"},
|
||||
{"\\[one\\].jpg", true, "[one].jpg"},
|
||||
} {
|
||||
f, err := NewFilter()
|
||||
require.NoError(t, err)
|
||||
err = f.Add(true, test.glob)
|
||||
require.NoError(t, err)
|
||||
err = f.Add(false, "*")
|
||||
require.NoError(t, err)
|
||||
included := f.Include(test.file, 0, time.Unix(0, 0))
|
||||
if included != test.included {
|
||||
t.Errorf("%q match %q: want %v got %v", test.glob, test.file, test.included, included)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
384
fs/fs.go
384
fs/fs.go
@@ -5,39 +5,58 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const (
|
||||
// UserAgent for Fs which can set it
|
||||
UserAgent = "rclone/" + Version
|
||||
// ModTimeNotSupported is a very large precision value to show
|
||||
// mod time isn't supported on this Fs
|
||||
ModTimeNotSupported = 100 * 365 * 24 * time.Hour
|
||||
// MaxLevel is a sentinel representing an infinite depth for listings
|
||||
MaxLevel = math.MaxInt32
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// UserAgent for Fs which can set it
|
||||
UserAgent = "rclone/" + Version
|
||||
// Filesystem registry
|
||||
fsRegistry []*Info
|
||||
fsRegistry []*RegInfo
|
||||
// ErrorNotFoundInConfigFile is returned by NewFs if not found in config file
|
||||
ErrorNotFoundInConfigFile = fmt.Errorf("Didn't find section in config file")
|
||||
ErrorCantCopy = fmt.Errorf("Can't copy object - incompatible remotes")
|
||||
ErrorCantMove = fmt.Errorf("Can't copy object - incompatible remotes")
|
||||
ErrorCantDirMove = fmt.Errorf("Can't copy directory - incompatible remotes")
|
||||
ErrorDirExists = fmt.Errorf("Can't copy directory - destination already exists")
|
||||
ErrorNotFoundInConfigFile = errors.New("didn't find section in config file")
|
||||
ErrorCantPurge = errors.New("can't purge directory")
|
||||
ErrorCantCopy = errors.New("can't copy object - incompatible remotes")
|
||||
ErrorCantMove = errors.New("can't move object - incompatible remotes")
|
||||
ErrorCantDirMove = errors.New("can't move directory - incompatible remotes")
|
||||
ErrorDirExists = errors.New("can't copy directory - destination already exists")
|
||||
ErrorCantSetModTime = errors.New("can't set modified time")
|
||||
ErrorDirNotFound = errors.New("directory not found")
|
||||
ErrorObjectNotFound = errors.New("object not found")
|
||||
ErrorLevelNotSupported = errors.New("level value not supported")
|
||||
ErrorListAborted = errors.New("list aborted")
|
||||
ErrorListOnlyRoot = errors.New("can only list from root")
|
||||
ErrorIsFile = errors.New("is a file not a directory")
|
||||
ErrorNotDeleting = errors.New("not deleting files as there were IO errors")
|
||||
ErrorCantMoveOverlapping = errors.New("can't move files on overlapping remotes")
|
||||
)
|
||||
|
||||
// Info information about a filesystem
|
||||
type Info struct {
|
||||
// RegInfo provides information about a filesystem
|
||||
type RegInfo struct {
|
||||
// Name of this fs
|
||||
Name string
|
||||
// Description of this fs - defaults to Name
|
||||
Description string
|
||||
// Create a new file system. If root refers to an existing
|
||||
// object, then it should return a Fs which only returns that
|
||||
// object.
|
||||
// object, then it should return a Fs which which points to
|
||||
// the parent of that object and ErrorIsFile.
|
||||
NewFs func(name string, root string) (Fs, error)
|
||||
// Function to call to help with config
|
||||
Config func(string)
|
||||
@@ -50,9 +69,24 @@ type Option struct {
|
||||
Name string
|
||||
Help string
|
||||
Optional bool
|
||||
Examples []OptionExample
|
||||
Examples OptionExamples
|
||||
}
|
||||
|
||||
// OptionExamples is a slice of examples
|
||||
type OptionExamples []OptionExample
|
||||
|
||||
// Len is part of sort.Interface.
|
||||
func (os OptionExamples) Len() int { return len(os) }
|
||||
|
||||
// Swap is part of sort.Interface.
|
||||
func (os OptionExamples) Swap(i, j int) { os[i], os[j] = os[j], os[i] }
|
||||
|
||||
// Less is part of sort.Interface.
|
||||
func (os OptionExamples) Less(i, j int) bool { return os[i].Help < os[j].Help }
|
||||
|
||||
// Sort sorts an OptionExamples
|
||||
func (os OptionExamples) Sort() { sort.Sort(os) }
|
||||
|
||||
// OptionExample describes an example for an Option
|
||||
type OptionExample struct {
|
||||
Value string
|
||||
@@ -62,36 +96,40 @@ type OptionExample struct {
|
||||
// Register a filesystem
|
||||
//
|
||||
// Fs modules should use this in an init() function
|
||||
func Register(info *Info) {
|
||||
func Register(info *RegInfo) {
|
||||
fsRegistry = append(fsRegistry, info)
|
||||
}
|
||||
|
||||
// ListFser is the interface for listing a remote Fs
|
||||
type ListFser interface {
|
||||
// List the objects and directories of the Fs starting from dir
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound (using out.SetError())
|
||||
// if the directory isn't found.
|
||||
//
|
||||
// Fses must support recursion levels of fs.MaxLevel and 1.
|
||||
// They may return ErrorLevelNotSupported otherwise.
|
||||
List(out ListOpts, dir string)
|
||||
}
|
||||
|
||||
// Fs is the interface a cloud storage system must provide
|
||||
type Fs interface {
|
||||
// Name of the remote (as passed into NewFs)
|
||||
Name() string
|
||||
Info
|
||||
ListFser
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
Root() string
|
||||
|
||||
// String returns a description of the FS
|
||||
String() string
|
||||
|
||||
// List the Fs into a channel
|
||||
List() ObjectsChan
|
||||
|
||||
// ListDir lists the Fs directories/buckets/containers into a channel
|
||||
ListDir() DirChan
|
||||
|
||||
// NewFsObject finds the Object at remote. Returns nil if can't be found
|
||||
NewFsObject(remote string) Object
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
NewObject(remote string) (Object, error)
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
Put(in io.Reader, remote string, modTime time.Time, size int64) (Object, error)
|
||||
Put(in io.Reader, src ObjectInfo) (Object, error)
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
@@ -102,49 +140,69 @@ type Fs interface {
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
Rmdir() error
|
||||
}
|
||||
|
||||
// Info provides an interface to reading information about a filesystem.
|
||||
type Info interface {
|
||||
// Name of the remote (as passed into NewFs)
|
||||
Name() string
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
Root() string
|
||||
|
||||
// String returns a description of the FS
|
||||
String() string
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
Precision() time.Duration
|
||||
|
||||
// Returns the supported hash types of the filesystem
|
||||
Hashes() HashSet
|
||||
}
|
||||
|
||||
// Object is a filesystem like object provided by an Fs
|
||||
type Object interface {
|
||||
ObjectInfo
|
||||
|
||||
// String returns a description of the Object
|
||||
String() string
|
||||
|
||||
// Fs returns the Fs that this object is part of
|
||||
Fs() Fs
|
||||
|
||||
// Remote returns the remote path
|
||||
Remote() string
|
||||
|
||||
// Md5sum returns the md5 checksum of the file
|
||||
// If no Md5sum is available it returns ""
|
||||
Md5sum() (string, error)
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
ModTime() time.Time
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
SetModTime(time.Time)
|
||||
|
||||
// Size returns the size of the file
|
||||
Size() int64
|
||||
SetModTime(time.Time) error
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
Open() (io.ReadCloser, error)
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
Update(in io.Reader, modTime time.Time, size int64) error
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
Storable() bool
|
||||
Update(in io.Reader, src ObjectInfo) error
|
||||
|
||||
// Removes this object
|
||||
Remove() error
|
||||
}
|
||||
|
||||
// ObjectInfo contains information about an object.
|
||||
type ObjectInfo interface {
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
Fs() Info
|
||||
|
||||
// Remote returns the remote path
|
||||
Remote() string
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
Hash(HashType) (string, error)
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
ModTime() time.Time
|
||||
|
||||
// Size returns the size of the file
|
||||
Size() int64
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
Storable() bool
|
||||
}
|
||||
|
||||
// Purger is an optional interfaces for Fs
|
||||
type Purger interface {
|
||||
// Purge all files in the root and the root directory
|
||||
@@ -197,57 +255,74 @@ type DirMover interface {
|
||||
DirMove(src Fs) error
|
||||
}
|
||||
|
||||
// Retry is optional interface for error as to whether the operation
|
||||
// should be retried at a high level.
|
||||
//
|
||||
// This should be returned from Update or Put methods as required
|
||||
type Retry interface {
|
||||
error
|
||||
Retry() bool
|
||||
// UnWrapper is an optional interfaces for Fs
|
||||
type UnWrapper interface {
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
UnWrap() Fs
|
||||
}
|
||||
|
||||
// retryError is a type of error
|
||||
type retryError string
|
||||
|
||||
// Error interface
|
||||
func (r retryError) Error() string {
|
||||
return string(r)
|
||||
// PutUncheckeder is an optional interface for Fs
|
||||
type PutUncheckeder interface {
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
//
|
||||
// May create duplicates or return errors if src already
|
||||
// exists.
|
||||
PutUnchecked(in io.Reader, src ObjectInfo) (Object, error)
|
||||
}
|
||||
|
||||
// Retry interface
|
||||
func (r retryError) Retry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retry = retryError("")
|
||||
|
||||
// RetryErrorf makes an error which indicates it would like to be retried
|
||||
func RetryErrorf(format string, a ...interface{}) error {
|
||||
return retryError(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// PlainRetryError is an error wrapped so it will retry
|
||||
type plainRetryError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// Retry interface
|
||||
func (err plainRetryError) Retry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retry = plainRetryError{(error)(nil)}
|
||||
|
||||
// RetryError makes an error which indicates it would like to be retried
|
||||
func RetryError(err error) error {
|
||||
return plainRetryError{err}
|
||||
// CleanUpper is an optional interfaces for Fs
|
||||
type CleanUpper interface {
|
||||
// CleanUp the trash in the Fs
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
// otherwise cleaning up old versions of files.
|
||||
CleanUp() error
|
||||
}
|
||||
|
||||
// ObjectsChan is a channel of Objects
|
||||
type ObjectsChan chan Object
|
||||
|
||||
// ListOpts describes the interface used for Fs.List operations
|
||||
type ListOpts interface {
|
||||
// Add an object to the output.
|
||||
// If the function returns true, the operation has been aborted.
|
||||
// Multiple goroutines can safely add objects concurrently.
|
||||
Add(obj Object) (abort bool)
|
||||
|
||||
// Add a directory to the output.
|
||||
// If the function returns true, the operation has been aborted.
|
||||
// Multiple goroutines can safely add objects concurrently.
|
||||
AddDir(dir *Dir) (abort bool)
|
||||
|
||||
// IncludeDirectory returns whether this directory should be
|
||||
// included in the listing (and recursed into or not).
|
||||
IncludeDirectory(remote string) bool
|
||||
|
||||
// SetError will set an error state, and will cause the listing to
|
||||
// be aborted.
|
||||
// Multiple goroutines can set the error state concurrently,
|
||||
// but only the first will be returned to the caller.
|
||||
SetError(err error)
|
||||
|
||||
// Level returns the level it should recurse to. Fses may
|
||||
// ignore this in which case the listing will be less
|
||||
// efficient.
|
||||
Level() int
|
||||
|
||||
// Buffer returns the channel depth in use
|
||||
Buffer() int
|
||||
|
||||
// Finished should be called when listing is finished
|
||||
Finished()
|
||||
|
||||
// IsFinished returns whether Finished or SetError have been called
|
||||
IsFinished() bool
|
||||
}
|
||||
|
||||
// Objects is a slice of Object~s
|
||||
type Objects []Object
|
||||
|
||||
@@ -274,17 +349,37 @@ type DirChan chan *Dir
|
||||
// Find looks for an Info object for the name passed in
|
||||
//
|
||||
// Services are looked up in the config file
|
||||
func Find(name string) (*Info, error) {
|
||||
func Find(name string) (*RegInfo, error) {
|
||||
for _, item := range fsRegistry {
|
||||
if item.Name == name {
|
||||
return item, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Didn't find filing system for %q", name)
|
||||
return nil, errors.Errorf("didn't find filing system for %q", name)
|
||||
}
|
||||
|
||||
// Pattern to match an rclone url
|
||||
var matcher = regexp.MustCompile(`^([\w_-]+):(.*)$`)
|
||||
var matcher = regexp.MustCompile(`^([\w_ -]+):(.*)$`)
|
||||
|
||||
// ParseRemote deconstructs a path into configName, fsPath, looking up
|
||||
// the fsName in the config file (returning NotFoundInConfigFile if not found)
|
||||
func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err error) {
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
var fsName string
|
||||
fsName, configName, fsPath = "local", "local", path
|
||||
if parts != nil && !isDriveLetter(parts[1]) {
|
||||
configName, fsPath = parts[1], parts[2]
|
||||
var err error
|
||||
fsName, err = ConfigFile.GetValue(configName, "type")
|
||||
if err != nil {
|
||||
return nil, "", "", ErrorNotFoundInConfigFile
|
||||
}
|
||||
}
|
||||
// change native directory separators to / if there are any
|
||||
fsPath = filepath.ToSlash(fsPath)
|
||||
fsInfo, err = Find(fsName)
|
||||
return fsInfo, configName, fsPath, err
|
||||
}
|
||||
|
||||
// NewFs makes a new Fs object from the path
|
||||
//
|
||||
@@ -296,46 +391,37 @@ var matcher = regexp.MustCompile(`^([\w_-]+):(.*)$`)
|
||||
// On Windows avoid single character remote names as they can be mixed
|
||||
// up with drive letters.
|
||||
func NewFs(path string) (Fs, error) {
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
fsName, configName, fsPath := "local", "local", path
|
||||
if parts != nil && !isDriveLetter(parts[1]) {
|
||||
configName, fsPath = parts[1], parts[2]
|
||||
var err error
|
||||
fsName, err = ConfigFile.GetValue(configName, "type")
|
||||
if err != nil {
|
||||
return nil, ErrorNotFoundInConfigFile
|
||||
}
|
||||
}
|
||||
fs, err := Find(fsName)
|
||||
fsInfo, configName, fsPath, err := ParseRemote(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// change native directory separators to / if there are any
|
||||
fsPath = filepath.ToSlash(fsPath)
|
||||
return fs.NewFs(configName, fsPath)
|
||||
return fsInfo.NewFs(configName, fsPath)
|
||||
}
|
||||
|
||||
// OutputLog logs for an object
|
||||
func OutputLog(o interface{}, text string, args ...interface{}) {
|
||||
description := ""
|
||||
if o != nil {
|
||||
description = fmt.Sprintf("%v: ", o)
|
||||
}
|
||||
// DebugLogger - logs to Stdout
|
||||
var DebugLogger = log.New(os.Stdout, "", log.LstdFlags)
|
||||
|
||||
// makeLog produces a log string from the arguments passed in
|
||||
func makeLog(o interface{}, text string, args ...interface{}) string {
|
||||
out := fmt.Sprintf(text, args...)
|
||||
log.Print(description + out)
|
||||
if o == nil {
|
||||
return out
|
||||
}
|
||||
return fmt.Sprintf("%v: %s", o, out)
|
||||
}
|
||||
|
||||
// Debug writes debuging output for this Object or Fs
|
||||
// Debug writes debugging output for this Object or Fs
|
||||
func Debug(o interface{}, text string, args ...interface{}) {
|
||||
if Config.Verbose {
|
||||
OutputLog(o, text, args...)
|
||||
DebugLogger.Print(makeLog(o, text, args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Log writes log output for this Object or Fs
|
||||
// Log writes log output for this Object or Fs. This should be
|
||||
// considered to be Info level logging.
|
||||
func Log(o interface{}, text string, args ...interface{}) {
|
||||
if !Config.Quiet {
|
||||
OutputLog(o, text, args...)
|
||||
log.Print(makeLog(o, text, args...))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -343,14 +429,60 @@ func Log(o interface{}, text string, args ...interface{}) {
|
||||
// unconditionally logs a message regardless of Config.Quiet or
|
||||
// Config.Verbose.
|
||||
func ErrorLog(o interface{}, text string, args ...interface{}) {
|
||||
OutputLog(o, text, args...)
|
||||
log.Print(makeLog(o, text, args...))
|
||||
}
|
||||
|
||||
// checkClose is a utility function used to check the return from
|
||||
// CheckClose is a utility function used to check the return from
|
||||
// Close in a defer statement.
|
||||
func checkClose(c io.Closer, err *error) {
|
||||
func CheckClose(c io.Closer, err *error) {
|
||||
cerr := c.Close()
|
||||
if *err == nil {
|
||||
*err = cerr
|
||||
}
|
||||
}
|
||||
|
||||
// NewStaticObjectInfo returns a static ObjectInfo
|
||||
// If hashes is nil and fs is not nil, the hash map will be replaced with
|
||||
// empty hashes of the types supported by the fs.
|
||||
func NewStaticObjectInfo(remote string, modTime time.Time, size int64, storable bool, hashes map[HashType]string, fs Info) ObjectInfo {
|
||||
info := &staticObjectInfo{
|
||||
remote: remote,
|
||||
modTime: modTime,
|
||||
size: size,
|
||||
storable: storable,
|
||||
hashes: hashes,
|
||||
fs: fs,
|
||||
}
|
||||
if fs != nil && hashes == nil {
|
||||
set := fs.Hashes().Array()
|
||||
info.hashes = make(map[HashType]string)
|
||||
for _, ht := range set {
|
||||
info.hashes[ht] = ""
|
||||
}
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
type staticObjectInfo struct {
|
||||
remote string
|
||||
modTime time.Time
|
||||
size int64
|
||||
storable bool
|
||||
hashes map[HashType]string
|
||||
fs Info
|
||||
}
|
||||
|
||||
func (i *staticObjectInfo) Fs() Info { return i.fs }
|
||||
func (i *staticObjectInfo) Remote() string { return i.remote }
|
||||
func (i *staticObjectInfo) ModTime() time.Time { return i.modTime }
|
||||
func (i *staticObjectInfo) Size() int64 { return i.size }
|
||||
func (i *staticObjectInfo) Storable() bool { return i.storable }
|
||||
func (i *staticObjectInfo) Hash(h HashType) (string, error) {
|
||||
if len(i.hashes) == 0 {
|
||||
return "", ErrHashUnsupported
|
||||
}
|
||||
if hash, ok := i.hashes[h]; ok {
|
||||
return hash, nil
|
||||
}
|
||||
return "", ErrHashUnsupported
|
||||
}
|
||||
|
||||
65
fs/glob.go
65
fs/glob.go
@@ -4,9 +4,10 @@ package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// globToRegexp converts an rsync style glob to a regexp
|
||||
@@ -29,7 +30,7 @@ func globToRegexp(glob string) (*regexp.Regexp, error) {
|
||||
case 2:
|
||||
_, _ = re.WriteString(`.*`)
|
||||
default:
|
||||
return fmt.Errorf("too many stars in %q", glob)
|
||||
return errors.Errorf("too many stars in %q", glob)
|
||||
}
|
||||
}
|
||||
consecutiveStars = 0
|
||||
@@ -72,16 +73,16 @@ func globToRegexp(glob string) (*regexp.Regexp, error) {
|
||||
_, _ = re.WriteRune(c)
|
||||
inBrackets++
|
||||
case ']':
|
||||
return nil, fmt.Errorf("mismatched ']' in glob %q", glob)
|
||||
return nil, errors.Errorf("mismatched ']' in glob %q", glob)
|
||||
case '{':
|
||||
if inBraces {
|
||||
return nil, fmt.Errorf("can't nest '{' '}' in glob %q", glob)
|
||||
return nil, errors.Errorf("can't nest '{' '}' in glob %q", glob)
|
||||
}
|
||||
inBraces = true
|
||||
_, _ = re.WriteRune('(')
|
||||
case '}':
|
||||
if !inBraces {
|
||||
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
|
||||
return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob)
|
||||
}
|
||||
_, _ = re.WriteRune(')')
|
||||
inBraces = false
|
||||
@@ -103,15 +104,63 @@ func globToRegexp(glob string) (*regexp.Regexp, error) {
|
||||
return nil, err
|
||||
}
|
||||
if inBrackets > 0 {
|
||||
return nil, fmt.Errorf("mismatched '[' and ']' in glob %q", glob)
|
||||
return nil, errors.Errorf("mismatched '[' and ']' in glob %q", glob)
|
||||
}
|
||||
if inBraces {
|
||||
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
|
||||
return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob)
|
||||
}
|
||||
_, _ = re.WriteRune('$')
|
||||
result, err := regexp.Compile(re.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Bad glob pattern %q: %v (%q)", glob, err, re.String())
|
||||
return nil, errors.Wrapf(err, "bad glob pattern %q (regexp %q)", glob, re.String())
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
var (
|
||||
// Can't deal with / or ** in {}
|
||||
tooHardRe = regexp.MustCompile(`{[^{}]*(\*\*|/)[^{}]*}`)
|
||||
|
||||
// Squash all /
|
||||
squashSlash = regexp.MustCompile(`/{2,}`)
|
||||
)
|
||||
|
||||
// globToDirGlobs takes a file glob and turns it into a series of
|
||||
// directory globs. When matched with a directory (with a trailing /)
|
||||
// this should answer the question as to whether this glob could be in
|
||||
// this directory.
|
||||
func globToDirGlobs(glob string) (out []string) {
|
||||
if tooHardRe.MatchString(glob) {
|
||||
// Can't figure this one out so return any directory might match
|
||||
out = append(out, "/**")
|
||||
return out
|
||||
}
|
||||
|
||||
// Get rid of multiple /s
|
||||
glob = squashSlash.ReplaceAllString(glob, "/")
|
||||
|
||||
// Split on / or **
|
||||
// (** can contain /)
|
||||
for {
|
||||
i := strings.LastIndex(glob, "/")
|
||||
j := strings.LastIndex(glob, "**")
|
||||
what := ""
|
||||
if j > i {
|
||||
i = j
|
||||
what = "**"
|
||||
}
|
||||
if i < 0 {
|
||||
if len(out) == 0 {
|
||||
out = append(out, "/**")
|
||||
}
|
||||
break
|
||||
}
|
||||
glob = glob[:i]
|
||||
newGlob := glob + what + "/"
|
||||
if len(out) == 0 || out[len(out)-1] != newGlob {
|
||||
out = append(out, newGlob)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGlobToRegexp(t *testing.T) {
|
||||
@@ -35,30 +37,68 @@ func TestGlobToRegexp(t *testing.T) {
|
||||
{`ab}c`, `(^|/)`, `mismatched '{' and '}'`},
|
||||
{`ab{c`, `(^|/)`, `mismatched '{' and '}'`},
|
||||
{`*.{jpg,png,gif}`, `(^|/)[^/]*\.(jpg|png|gif)$`, ``},
|
||||
{`[a--b]`, `(^|/)`, `Bad glob pattern`},
|
||||
{`[a--b]`, `(^|/)`, `bad glob pattern`},
|
||||
{`a\*b`, `(^|/)a\*b$`, ``},
|
||||
{`a\\b`, `(^|/)a\\b$`, ``},
|
||||
} {
|
||||
gotRe, err := globToRegexp(test.in)
|
||||
if test.error == "" {
|
||||
if err != nil {
|
||||
t.Errorf("%q: not expecting error: %v", test.in, err)
|
||||
} else {
|
||||
got := gotRe.String()
|
||||
if test.want != got {
|
||||
t.Errorf("%q: want %q got %q", test.in, test.want, got)
|
||||
}
|
||||
}
|
||||
got := gotRe.String()
|
||||
require.NoError(t, err, test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
} else {
|
||||
if err == nil {
|
||||
t.Errorf("%q: expecting error but didn't get one", test.in)
|
||||
} else {
|
||||
got := err.Error()
|
||||
if !strings.Contains(got, test.error) {
|
||||
t.Errorf("%q: want error %q got %q", test.in, test.error, got)
|
||||
}
|
||||
}
|
||||
require.Error(t, err, test.in)
|
||||
assert.Contains(t, err.Error(), test.error, test.in)
|
||||
assert.Nil(t, gotRe)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGlobToDirGlobs(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
}{
|
||||
{`*`, []string{"/**"}},
|
||||
{`/*`, []string{"/"}},
|
||||
{`*.jpg`, []string{"/**"}},
|
||||
{`/*.jpg`, []string{"/"}},
|
||||
{`//*.jpg`, []string{"/"}},
|
||||
{`///*.jpg`, []string{"/"}},
|
||||
{`/a/*.jpg`, []string{"/a/", "/"}},
|
||||
{`/a//*.jpg`, []string{"/a/", "/"}},
|
||||
{`/a///*.jpg`, []string{"/a/", "/"}},
|
||||
{`/a/b/*.jpg`, []string{"/a/b/", "/a/", "/"}},
|
||||
{`a/*.jpg`, []string{"a/"}},
|
||||
{`a/b/*.jpg`, []string{"a/b/", "a/"}},
|
||||
{`*/*/*.jpg`, []string{"*/*/", "*/"}},
|
||||
{`a/b/`, []string{"a/b/", "a/"}},
|
||||
{`a/b`, []string{"a/"}},
|
||||
{`a/b/*.{jpg,png,gif}`, []string{"a/b/", "a/"}},
|
||||
{`/a/{jpg,png,gif}/*.{jpg,png,gif}`, []string{"/a/{jpg,png,gif}/", "/a/", "/"}},
|
||||
{`a/{a,a*b,a**c}/d/`, []string{"/**"}},
|
||||
{`/a/{a,a*b,a/c,d}/d/`, []string{"/**"}},
|
||||
{`**`, []string{"**/"}},
|
||||
{`a**`, []string{"a**/"}},
|
||||
{`a**b`, []string{"a**/"}},
|
||||
{`a**b**c**d`, []string{"a**b**c**/", "a**b**/", "a**/"}},
|
||||
{`a**b/c**d`, []string{"a**b/c**/", "a**b/", "a**/"}},
|
||||
{`/A/a**b/B/c**d/C/`, []string{"/A/a**b/B/c**d/C/", "/A/a**b/B/c**d/", "/A/a**b/B/c**/", "/A/a**b/B/", "/A/a**b/", "/A/a**/", "/A/", "/"}},
|
||||
{`/var/spool/**/ncw`, []string{"/var/spool/**/", "/var/spool/", "/var/", "/"}},
|
||||
{`var/spool/**/ncw/`, []string{"var/spool/**/ncw/", "var/spool/**/", "var/spool/", "var/"}},
|
||||
{"/file1.jpg", []string{`/`}},
|
||||
{"/file2.png", []string{`/`}},
|
||||
{"/*.jpg", []string{`/`}},
|
||||
{"/*.png", []string{`/`}},
|
||||
{"/potato", []string{`/`}},
|
||||
{"/sausage1", []string{`/`}},
|
||||
{"/sausage2*", []string{`/`}},
|
||||
{"/sausage3**", []string{`/sausage3**/`, "/"}},
|
||||
{"/a/*.jpg", []string{`/a/`, "/"}},
|
||||
} {
|
||||
_, err := globToRegexp(test.in)
|
||||
assert.NoError(t, err)
|
||||
got := globToDirGlobs(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
255
fs/hash.go
Normal file
255
fs/hash.go
Normal file
@@ -0,0 +1,255 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HashType indicates a standard hashing algorithm
|
||||
type HashType int
|
||||
|
||||
// ErrHashUnsupported should be returned by filesystem,
|
||||
// if it is requested to deliver an unsupported hash type.
|
||||
var ErrHashUnsupported = errors.New("hash type not supported")
|
||||
|
||||
const (
|
||||
// HashMD5 indicates MD5 support
|
||||
HashMD5 HashType = 1 << iota
|
||||
|
||||
// HashSHA1 indicates SHA-1 support
|
||||
HashSHA1
|
||||
|
||||
// HashNone indicates no hashes are supported
|
||||
HashNone HashType = 0
|
||||
)
|
||||
|
||||
// SupportedHashes returns a set of all the supported hashes by
|
||||
// HashStream and MultiHasher.
|
||||
var SupportedHashes = NewHashSet(HashMD5, HashSHA1)
|
||||
|
||||
// HashWidth returns the width in characters for any HashType
|
||||
var HashWidth = map[HashType]int{
|
||||
HashMD5: 32,
|
||||
HashSHA1: 40,
|
||||
}
|
||||
|
||||
// HashStream will calculate hashes of all supported hash types.
|
||||
func HashStream(r io.Reader) (map[HashType]string, error) {
|
||||
return HashStreamTypes(r, SupportedHashes)
|
||||
}
|
||||
|
||||
// HashStreamTypes will calculate hashes of the requested hash types.
|
||||
func HashStreamTypes(r io.Reader, set HashSet) (map[HashType]string, error) {
|
||||
hashers, err := hashFromTypes(set)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = io.Copy(hashToMultiWriter(hashers), r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var ret = make(map[HashType]string)
|
||||
for k, v := range hashers {
|
||||
ret[k] = hex.EncodeToString(v.Sum(nil))
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// String returns a string representation of the hash type.
|
||||
// The function will panic if the hash type is unknown.
|
||||
func (h HashType) String() string {
|
||||
switch h {
|
||||
case HashNone:
|
||||
return "None"
|
||||
case HashMD5:
|
||||
return "MD5"
|
||||
case HashSHA1:
|
||||
return "SHA-1"
|
||||
default:
|
||||
err := fmt.Sprintf("internal error: unknown hash type: 0x%x", int(h))
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// hashFromTypes will return hashers for all the requested types.
|
||||
// The types must be a subset of SupportedHashes,
|
||||
// and this function must support all types.
|
||||
func hashFromTypes(set HashSet) (map[HashType]hash.Hash, error) {
|
||||
if !set.SubsetOf(SupportedHashes) {
|
||||
return nil, errors.Errorf("requested set %08x contains unknown hash types", int(set))
|
||||
}
|
||||
var hashers = make(map[HashType]hash.Hash)
|
||||
types := set.Array()
|
||||
for _, t := range types {
|
||||
switch t {
|
||||
case HashMD5:
|
||||
hashers[t] = md5.New()
|
||||
case HashSHA1:
|
||||
hashers[t] = sha1.New()
|
||||
default:
|
||||
err := fmt.Sprintf("internal error: Unsupported hash type %v", t)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
return hashers, nil
|
||||
}
|
||||
|
||||
// hashToMultiWriter will return a set of hashers into a
|
||||
// single multiwriter, where one write will update all
|
||||
// the hashers.
|
||||
func hashToMultiWriter(h map[HashType]hash.Hash) io.Writer {
|
||||
// Convert to to slice
|
||||
var w = make([]io.Writer, 0, len(h))
|
||||
for _, v := range h {
|
||||
w = append(w, v)
|
||||
}
|
||||
return io.MultiWriter(w...)
|
||||
}
|
||||
|
||||
// A MultiHasher will construct various hashes on
|
||||
// all incoming writes.
|
||||
type MultiHasher struct {
|
||||
w io.Writer
|
||||
size int64
|
||||
h map[HashType]hash.Hash // Hashes
|
||||
}
|
||||
|
||||
// NewMultiHasher will return a hash writer that will write all
|
||||
// supported hash types.
|
||||
func NewMultiHasher() *MultiHasher {
|
||||
h, err := NewMultiHasherTypes(SupportedHashes)
|
||||
if err != nil {
|
||||
panic("internal error: could not create multihasher")
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// NewMultiHasherTypes will return a hash writer that will write
|
||||
// the requested hash types.
|
||||
func NewMultiHasherTypes(set HashSet) (*MultiHasher, error) {
|
||||
hashers, err := hashFromTypes(set)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := MultiHasher{h: hashers, w: hashToMultiWriter(hashers)}
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
func (m *MultiHasher) Write(p []byte) (n int, err error) {
|
||||
n, err = m.w.Write(p)
|
||||
m.size += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Sums returns the sums of all accumulated hashes as hex encoded
|
||||
// strings.
|
||||
func (m *MultiHasher) Sums() map[HashType]string {
|
||||
dst := make(map[HashType]string)
|
||||
for k, v := range m.h {
|
||||
dst[k] = hex.EncodeToString(v.Sum(nil))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Size returns the number of bytes written
|
||||
func (m *MultiHasher) Size() int64 {
|
||||
return m.size
|
||||
}
|
||||
|
||||
// A HashSet Indicates one or more hash types.
|
||||
type HashSet int
|
||||
|
||||
// NewHashSet will create a new hash set with the hash types supplied
|
||||
func NewHashSet(t ...HashType) HashSet {
|
||||
h := HashSet(HashNone)
|
||||
return h.Add(t...)
|
||||
}
|
||||
|
||||
// Add one or more hash types to the set.
|
||||
// Returns the modified hash set.
|
||||
func (h *HashSet) Add(t ...HashType) HashSet {
|
||||
for _, v := range t {
|
||||
*h |= HashSet(v)
|
||||
}
|
||||
return *h
|
||||
}
|
||||
|
||||
// Contains returns true if the
|
||||
func (h HashSet) Contains(t HashType) bool {
|
||||
return int(h)&int(t) != 0
|
||||
}
|
||||
|
||||
// Overlap returns the overlapping hash types
|
||||
func (h HashSet) Overlap(t HashSet) HashSet {
|
||||
return HashSet(int(h) & int(t))
|
||||
}
|
||||
|
||||
// SubsetOf will return true if all types of h
|
||||
// is present in the set c
|
||||
func (h HashSet) SubsetOf(c HashSet) bool {
|
||||
return int(h)|int(c) == int(c)
|
||||
}
|
||||
|
||||
// GetOne will return a hash type.
|
||||
// Currently the first is returned, but it could be
|
||||
// improved to return the strongest.
|
||||
func (h HashSet) GetOne() HashType {
|
||||
v := int(h)
|
||||
i := uint(0)
|
||||
for v != 0 {
|
||||
if v&1 != 0 {
|
||||
return HashType(1 << i)
|
||||
}
|
||||
i++
|
||||
v >>= 1
|
||||
}
|
||||
return HashType(HashNone)
|
||||
}
|
||||
|
||||
// Array returns an array of all hash types in the set
|
||||
func (h HashSet) Array() (ht []HashType) {
|
||||
v := int(h)
|
||||
i := uint(0)
|
||||
for v != 0 {
|
||||
if v&1 != 0 {
|
||||
ht = append(ht, HashType(1<<i))
|
||||
}
|
||||
i++
|
||||
v >>= 1
|
||||
}
|
||||
return ht
|
||||
}
|
||||
|
||||
// Count returns the number of hash types in the set
|
||||
func (h HashSet) Count() int {
|
||||
if int(h) == 0 {
|
||||
return 0
|
||||
}
|
||||
// credit: https://code.google.com/u/arnehormann/
|
||||
x := uint64(h)
|
||||
x -= (x >> 1) & 0x5555555555555555
|
||||
x = (x>>2)&0x3333333333333333 + x&0x3333333333333333
|
||||
x += x >> 4
|
||||
x &= 0x0f0f0f0f0f0f0f0f
|
||||
x *= 0x0101010101010101
|
||||
return int(x >> 56)
|
||||
}
|
||||
|
||||
// String returns a string representation of the hash set.
|
||||
// The function will panic if it contains an unknown type.
|
||||
func (h HashSet) String() string {
|
||||
a := h.Array()
|
||||
var r []string
|
||||
for _, v := range a {
|
||||
r = append(r, v.String())
|
||||
}
|
||||
return "[" + strings.Join(r, ", ") + "]"
|
||||
}
|
||||
161
fs/hash_test.go
Normal file
161
fs/hash_test.go
Normal file
@@ -0,0 +1,161 @@
|
||||
package fs_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHashSet(t *testing.T) {
|
||||
var h fs.HashSet
|
||||
|
||||
assert.Equal(t, 0, h.Count())
|
||||
|
||||
a := h.Array()
|
||||
assert.Len(t, a, 0)
|
||||
|
||||
h = h.Add(fs.HashMD5)
|
||||
assert.Equal(t, 1, h.Count())
|
||||
assert.Equal(t, fs.HashMD5, h.GetOne())
|
||||
a = h.Array()
|
||||
assert.Len(t, a, 1)
|
||||
assert.Equal(t, a[0], fs.HashMD5)
|
||||
|
||||
// Test overlap, with all hashes
|
||||
h = h.Overlap(fs.SupportedHashes)
|
||||
assert.Equal(t, 1, h.Count())
|
||||
assert.Equal(t, fs.HashMD5, h.GetOne())
|
||||
assert.True(t, h.SubsetOf(fs.SupportedHashes))
|
||||
assert.True(t, h.SubsetOf(fs.NewHashSet(fs.HashMD5)))
|
||||
|
||||
h = h.Add(fs.HashSHA1)
|
||||
assert.Equal(t, 2, h.Count())
|
||||
one := h.GetOne()
|
||||
if !(one == fs.HashMD5 || one == fs.HashSHA1) {
|
||||
t.Fatalf("expected to be either MD5 or SHA1, got %v", one)
|
||||
}
|
||||
assert.True(t, h.SubsetOf(fs.SupportedHashes))
|
||||
assert.False(t, h.SubsetOf(fs.NewHashSet(fs.HashMD5)))
|
||||
assert.False(t, h.SubsetOf(fs.NewHashSet(fs.HashSHA1)))
|
||||
assert.True(t, h.SubsetOf(fs.NewHashSet(fs.HashMD5, fs.HashSHA1)))
|
||||
a = h.Array()
|
||||
assert.Len(t, a, 2)
|
||||
|
||||
ol := h.Overlap(fs.NewHashSet(fs.HashMD5))
|
||||
assert.Equal(t, 1, ol.Count())
|
||||
assert.True(t, ol.Contains(fs.HashMD5))
|
||||
assert.False(t, ol.Contains(fs.HashSHA1))
|
||||
|
||||
ol = h.Overlap(fs.NewHashSet(fs.HashMD5, fs.HashSHA1))
|
||||
assert.Equal(t, 2, ol.Count())
|
||||
assert.True(t, ol.Contains(fs.HashMD5))
|
||||
assert.True(t, ol.Contains(fs.HashSHA1))
|
||||
}
|
||||
|
||||
type hashTest struct {
|
||||
input []byte
|
||||
output map[fs.HashType]string
|
||||
}
|
||||
|
||||
var hashTestSet = []hashTest{
|
||||
{
|
||||
input: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14},
|
||||
output: map[fs.HashType]string{
|
||||
fs.HashMD5: "bf13fc19e5151ac57d4252e0e0f87abe",
|
||||
fs.HashSHA1: "3ab6543c08a75f292a5ecedac87ec41642d12166",
|
||||
},
|
||||
},
|
||||
// Empty data set
|
||||
{
|
||||
input: []byte{},
|
||||
output: map[fs.HashType]string{
|
||||
fs.HashMD5: "d41d8cd98f00b204e9800998ecf8427e",
|
||||
fs.HashSHA1: "da39a3ee5e6b4b0d3255bfef95601890afd80709",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestMultiHasher(t *testing.T) {
|
||||
for _, test := range hashTestSet {
|
||||
mh := fs.NewMultiHasher()
|
||||
n, err := io.Copy(mh, bytes.NewBuffer(test.input))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, test.input, int(n))
|
||||
sums := mh.Sums()
|
||||
for k, v := range sums {
|
||||
expect, ok := test.output[k]
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, v, expect)
|
||||
}
|
||||
// Test that all are present
|
||||
for k, v := range test.output {
|
||||
expect, ok := sums[k]
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, v, expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiHasherTypes(t *testing.T) {
|
||||
h := fs.HashSHA1
|
||||
for _, test := range hashTestSet {
|
||||
mh, err := fs.NewMultiHasherTypes(fs.NewHashSet(h))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(mh, bytes.NewBuffer(test.input))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, test.input, int(n))
|
||||
sums := mh.Sums()
|
||||
assert.Len(t, sums, 1)
|
||||
assert.Equal(t, sums[h], test.output[h])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashStream(t *testing.T) {
|
||||
for _, test := range hashTestSet {
|
||||
sums, err := fs.HashStream(bytes.NewBuffer(test.input))
|
||||
require.NoError(t, err)
|
||||
for k, v := range sums {
|
||||
expect, ok := test.output[k]
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, v, expect)
|
||||
}
|
||||
// Test that all are present
|
||||
for k, v := range test.output {
|
||||
expect, ok := sums[k]
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, v, expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashStreamTypes(t *testing.T) {
|
||||
h := fs.HashSHA1
|
||||
for _, test := range hashTestSet {
|
||||
sums, err := fs.HashStreamTypes(bytes.NewBuffer(test.input), fs.NewHashSet(h))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, sums, 1)
|
||||
assert.Equal(t, sums[h], test.output[h])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashSetStringer(t *testing.T) {
|
||||
h := fs.NewHashSet(fs.HashSHA1, fs.HashMD5)
|
||||
assert.Equal(t, h.String(), "[MD5, SHA-1]")
|
||||
h = fs.NewHashSet(fs.HashSHA1)
|
||||
assert.Equal(t, h.String(), "[SHA-1]")
|
||||
h = fs.NewHashSet()
|
||||
assert.Equal(t, h.String(), "[]")
|
||||
}
|
||||
|
||||
func TestHashStringer(t *testing.T) {
|
||||
h := fs.HashMD5
|
||||
assert.Equal(t, h.String(), "MD5")
|
||||
h = fs.HashNone
|
||||
assert.Equal(t, h.String(), "None")
|
||||
}
|
||||
118
fs/limited.go
118
fs/limited.go
@@ -1,118 +0,0 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Limited defines a Fs which can only return the Objects passed in
|
||||
// from the Fs passed in
|
||||
type Limited struct {
|
||||
objects []Object
|
||||
fs Fs
|
||||
}
|
||||
|
||||
// NewLimited maks a limited Fs limited to the objects passed in
|
||||
func NewLimited(fs Fs, objects ...Object) Fs {
|
||||
f := &Limited{
|
||||
objects: objects,
|
||||
fs: fs,
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// Name is name of the remote (as passed into NewFs)
|
||||
func (f *Limited) Name() string {
|
||||
return f.fs.Name() // return name of underlying remote
|
||||
}
|
||||
|
||||
// Root is the root of the remote (as passed into NewFs)
|
||||
func (f *Limited) Root() string {
|
||||
return f.fs.Root() // return root of underlying remote
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Limited) String() string {
|
||||
return fmt.Sprintf("%s limited to %d objects", f.fs.String(), len(f.objects))
|
||||
}
|
||||
|
||||
// List the Fs into a channel
|
||||
func (f *Limited) List() ObjectsChan {
|
||||
out := make(ObjectsChan, Config.Checkers)
|
||||
go func() {
|
||||
for _, obj := range f.objects {
|
||||
out <- obj
|
||||
}
|
||||
close(out)
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// ListDir lists the Fs directories/buckets/containers into a channel
|
||||
func (f *Limited) ListDir() DirChan {
|
||||
out := make(DirChan, Config.Checkers)
|
||||
close(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// NewFsObject finds the Object at remote. Returns nil if can't be found
|
||||
func (f *Limited) NewFsObject(remote string) Object {
|
||||
for _, obj := range f.objects {
|
||||
if obj.Remote() == remote {
|
||||
return obj
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Limited) Put(in io.Reader, remote string, modTime time.Time, size int64) (Object, error) {
|
||||
obj := f.NewFsObject(remote)
|
||||
if obj == nil {
|
||||
return nil, fmt.Errorf("Can't create %q in limited fs", remote)
|
||||
}
|
||||
return obj, obj.Update(in, modTime, size)
|
||||
}
|
||||
|
||||
// Mkdir make the directory (container, bucket)
|
||||
func (f *Limited) Mkdir() error {
|
||||
// All directories are already made - just ignore
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
func (f *Limited) Rmdir() error {
|
||||
// Ignore this in a limited fs
|
||||
return nil
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Limited) Precision() time.Duration {
|
||||
return f.fs.Precision()
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Limited) Copy(src Object, remote string) (Object, error) {
|
||||
fCopy, ok := f.fs.(Copier)
|
||||
if !ok {
|
||||
return nil, ErrorCantCopy
|
||||
}
|
||||
return fCopy.Copy(src, remote)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ Fs = &Limited{}
|
||||
var _ Copier = &Limited{}
|
||||
256
fs/lister.go
Normal file
256
fs/lister.go
Normal file
@@ -0,0 +1,256 @@
|
||||
// This file implements the Lister object
|
||||
|
||||
package fs
|
||||
|
||||
import "sync"
|
||||
|
||||
// listerResult is returned by the lister methods
|
||||
type listerResult struct {
|
||||
Obj Object
|
||||
Dir *Dir
|
||||
Err error
|
||||
}
|
||||
|
||||
// Lister objects are used for conniltrolling listing of Fs objects
|
||||
type Lister struct {
|
||||
mu sync.RWMutex
|
||||
buffer int
|
||||
abort bool
|
||||
results chan listerResult
|
||||
finished sync.Once
|
||||
level int
|
||||
filter *Filter
|
||||
}
|
||||
|
||||
// NewLister creates a Lister object.
|
||||
//
|
||||
// The default channel buffer size will be Config.Checkers unless
|
||||
// overridden with SetBuffer. The default level will be infinite.
|
||||
func NewLister() *Lister {
|
||||
o := &Lister{}
|
||||
return o.SetLevel(-1).SetBuffer(Config.Checkers)
|
||||
}
|
||||
|
||||
// Start starts a go routine listing the Fs passed in. It returns the
|
||||
// same Lister that was passed in for convenience.
|
||||
func (o *Lister) Start(f ListFser, dir string) *Lister {
|
||||
o.results = make(chan listerResult, o.buffer)
|
||||
go func() {
|
||||
f.List(o, dir)
|
||||
}()
|
||||
return o
|
||||
}
|
||||
|
||||
// SetLevel sets the level to recurse to. It returns same Lister that
|
||||
// was passed in for convenience. If Level is < 0 then it sets it to
|
||||
// infinite. Must be called before Start().
|
||||
func (o *Lister) SetLevel(level int) *Lister {
|
||||
if level < 0 {
|
||||
o.level = MaxLevel
|
||||
} else {
|
||||
o.level = level
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// SetFilter sets the Filter that is in use. It defaults to no
|
||||
// filtering. Must be called before Start().
|
||||
func (o *Lister) SetFilter(filter *Filter) *Lister {
|
||||
o.filter = filter
|
||||
return o
|
||||
}
|
||||
|
||||
// Level gets the recursion level for this listing.
|
||||
//
|
||||
// Fses may ignore this, but should implement it for improved efficiency if possible.
|
||||
//
|
||||
// Level 1 means list just the contents of the directory
|
||||
//
|
||||
// Each returned item must have less than level `/`s in.
|
||||
func (o *Lister) Level() int {
|
||||
return o.level
|
||||
}
|
||||
|
||||
// SetBuffer sets the channel buffer size in use. Must be called
|
||||
// before Start().
|
||||
func (o *Lister) SetBuffer(buffer int) *Lister {
|
||||
if buffer < 1 {
|
||||
buffer = 1
|
||||
}
|
||||
o.buffer = buffer
|
||||
return o
|
||||
}
|
||||
|
||||
// Buffer gets the channel buffer size in use
|
||||
func (o *Lister) Buffer() int {
|
||||
return o.buffer
|
||||
}
|
||||
|
||||
// Add an object to the output.
|
||||
// If the function returns true, the operation has been aborted.
|
||||
// Multiple goroutines can safely add objects concurrently.
|
||||
func (o *Lister) Add(obj Object) (abort bool) {
|
||||
o.mu.RLock()
|
||||
defer o.mu.RUnlock()
|
||||
if o.abort {
|
||||
return true
|
||||
}
|
||||
o.results <- listerResult{Obj: obj}
|
||||
return false
|
||||
}
|
||||
|
||||
// AddDir will a directory to the output.
|
||||
// If the function returns true, the operation has been aborted.
|
||||
// Multiple goroutines can safely add objects concurrently.
|
||||
func (o *Lister) AddDir(dir *Dir) (abort bool) {
|
||||
o.mu.RLock()
|
||||
defer o.mu.RUnlock()
|
||||
if o.abort {
|
||||
return true
|
||||
}
|
||||
o.results <- listerResult{Dir: dir}
|
||||
return false
|
||||
}
|
||||
|
||||
// IncludeDirectory returns whether this directory should be
|
||||
// included in the listing (and recursed into or not).
|
||||
func (o *Lister) IncludeDirectory(remote string) bool {
|
||||
if o.filter == nil {
|
||||
return true
|
||||
}
|
||||
return o.filter.IncludeDirectory(remote)
|
||||
}
|
||||
|
||||
// SetError will set an error state, and will cause the listing to
|
||||
// be aborted.
|
||||
// Multiple goroutines can set the error state concurrently,
|
||||
// but only the first will be returned to the caller.
|
||||
func (o *Lister) SetError(err error) {
|
||||
o.mu.RLock()
|
||||
if err != nil && !o.abort {
|
||||
o.results <- listerResult{Err: err}
|
||||
}
|
||||
o.mu.RUnlock()
|
||||
o.Finished()
|
||||
}
|
||||
|
||||
// Finished should be called when listing is finished
|
||||
func (o *Lister) Finished() {
|
||||
o.finished.Do(func() {
|
||||
o.mu.Lock()
|
||||
o.abort = true
|
||||
close(o.results)
|
||||
o.mu.Unlock()
|
||||
})
|
||||
}
|
||||
|
||||
// IsFinished returns whether the directory listing is finished or not
|
||||
func (o *Lister) IsFinished() bool {
|
||||
o.mu.RLock()
|
||||
defer o.mu.RUnlock()
|
||||
return o.abort
|
||||
}
|
||||
|
||||
// Get an object from the listing.
|
||||
// Will return either an object or a directory, never both.
|
||||
// Will return (nil, nil, nil) when all objects have been returned.
|
||||
func (o *Lister) Get() (Object, *Dir, error) {
|
||||
select {
|
||||
case r := <-o.results:
|
||||
return r.Obj, r.Dir, r.Err
|
||||
}
|
||||
}
|
||||
|
||||
// GetAll gets all the objects and dirs from the listing.
|
||||
func (o *Lister) GetAll() (objs []Object, dirs []*Dir, err error) {
|
||||
for {
|
||||
obj, dir, err := o.Get()
|
||||
switch {
|
||||
case err != nil:
|
||||
return nil, nil, err
|
||||
case obj != nil:
|
||||
objs = append(objs, obj)
|
||||
case dir != nil:
|
||||
dirs = append(dirs, dir)
|
||||
default:
|
||||
return objs, dirs, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetObject will return an object from the listing.
|
||||
// It will skip over any directories.
|
||||
// Will return (nil, nil) when all objects have been returned.
|
||||
func (o *Lister) GetObject() (Object, error) {
|
||||
for {
|
||||
obj, dir, err := o.Get()
|
||||
switch {
|
||||
case err != nil:
|
||||
return nil, err
|
||||
case obj != nil:
|
||||
return obj, nil
|
||||
case dir != nil:
|
||||
// ignore
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetObjects will return a slice of object from the listing.
|
||||
// It will skip over any directories.
|
||||
func (o *Lister) GetObjects() (objs []Object, err error) {
|
||||
for {
|
||||
obj, dir, err := o.Get()
|
||||
switch {
|
||||
case err != nil:
|
||||
return nil, err
|
||||
case obj != nil:
|
||||
objs = append(objs, obj)
|
||||
case dir != nil:
|
||||
// ignore
|
||||
default:
|
||||
return objs, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetDir will return a directory from the listing.
|
||||
// It will skip over any objects.
|
||||
// Will return (nil, nil) when all objects have been returned.
|
||||
func (o *Lister) GetDir() (*Dir, error) {
|
||||
for {
|
||||
obj, dir, err := o.Get()
|
||||
switch {
|
||||
case err != nil:
|
||||
return nil, err
|
||||
case obj != nil:
|
||||
// ignore
|
||||
case dir != nil:
|
||||
return dir, nil
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetDirs will return a slice of directories from the listing.
|
||||
// It will skip over any objects.
|
||||
func (o *Lister) GetDirs() (dirs []*Dir, err error) {
|
||||
for {
|
||||
obj, dir, err := o.Get()
|
||||
switch {
|
||||
case err != nil:
|
||||
return nil, err
|
||||
case obj != nil:
|
||||
// ignore
|
||||
case dir != nil:
|
||||
dirs = append(dirs, dir)
|
||||
default:
|
||||
return dirs, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ ListOpts = (*Lister)(nil)
|
||||
350
fs/lister_test.go
Normal file
350
fs/lister_test.go
Normal file
@@ -0,0 +1,350 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestListerNew(t *testing.T) {
|
||||
o := NewLister()
|
||||
assert.Equal(t, Config.Checkers, o.buffer)
|
||||
assert.Equal(t, false, o.abort)
|
||||
assert.Equal(t, MaxLevel, o.level)
|
||||
}
|
||||
|
||||
var errNotImpl = errors.New("not implemented")
|
||||
|
||||
type mockObject string
|
||||
|
||||
func (o mockObject) String() string { return string(o) }
|
||||
func (o mockObject) Fs() Info { return nil }
|
||||
func (o mockObject) Remote() string { return string(o) }
|
||||
func (o mockObject) Hash(HashType) (string, error) { return "", errNotImpl }
|
||||
func (o mockObject) ModTime() (t time.Time) { return t }
|
||||
func (o mockObject) Size() int64 { return 0 }
|
||||
func (o mockObject) Storable() bool { return true }
|
||||
func (o mockObject) SetModTime(time.Time) error { return errNotImpl }
|
||||
func (o mockObject) Open() (io.ReadCloser, error) { return nil, errNotImpl }
|
||||
func (o mockObject) Update(in io.Reader, src ObjectInfo) error { return errNotImpl }
|
||||
func (o mockObject) Remove() error { return errNotImpl }
|
||||
|
||||
type mockFs struct {
|
||||
listFn func(o ListOpts, dir string)
|
||||
}
|
||||
|
||||
func (f *mockFs) List(o ListOpts, dir string) {
|
||||
defer o.Finished()
|
||||
f.listFn(o, dir)
|
||||
}
|
||||
|
||||
func TestListerStart(t *testing.T) {
|
||||
f := &mockFs{}
|
||||
ranList := false
|
||||
f.listFn = func(o ListOpts, dir string) {
|
||||
ranList = true
|
||||
}
|
||||
o := NewLister().Start(f, "")
|
||||
objs, dirs, err := o.GetAll()
|
||||
require.Nil(t, err)
|
||||
assert.Len(t, objs, 0)
|
||||
assert.Len(t, dirs, 0)
|
||||
assert.Equal(t, true, ranList)
|
||||
}
|
||||
|
||||
func TestListerSetLevel(t *testing.T) {
|
||||
o := NewLister()
|
||||
o.SetLevel(1)
|
||||
assert.Equal(t, 1, o.level)
|
||||
o.SetLevel(0)
|
||||
assert.Equal(t, 0, o.level)
|
||||
o.SetLevel(-1)
|
||||
assert.Equal(t, MaxLevel, o.level)
|
||||
}
|
||||
|
||||
func TestListerSetFilter(t *testing.T) {
|
||||
filter := &Filter{}
|
||||
o := NewLister().SetFilter(filter)
|
||||
assert.Equal(t, filter, o.filter)
|
||||
}
|
||||
|
||||
func TestListerLevel(t *testing.T) {
|
||||
o := NewLister()
|
||||
assert.Equal(t, MaxLevel, o.Level())
|
||||
o.SetLevel(123)
|
||||
assert.Equal(t, 123, o.Level())
|
||||
}
|
||||
|
||||
func TestListerSetBuffer(t *testing.T) {
|
||||
o := NewLister()
|
||||
o.SetBuffer(2)
|
||||
assert.Equal(t, 2, o.buffer)
|
||||
o.SetBuffer(1)
|
||||
assert.Equal(t, 1, o.buffer)
|
||||
o.SetBuffer(0)
|
||||
assert.Equal(t, 1, o.buffer)
|
||||
o.SetBuffer(-1)
|
||||
assert.Equal(t, 1, o.buffer)
|
||||
}
|
||||
|
||||
func TestListerBuffer(t *testing.T) {
|
||||
o := NewLister()
|
||||
assert.Equal(t, Config.Checkers, o.Buffer())
|
||||
o.SetBuffer(123)
|
||||
assert.Equal(t, 123, o.Buffer())
|
||||
}
|
||||
|
||||
func TestListerAdd(t *testing.T) {
|
||||
f := &mockFs{}
|
||||
objs := []Object{
|
||||
mockObject("1"),
|
||||
mockObject("2"),
|
||||
}
|
||||
f.listFn = func(o ListOpts, dir string) {
|
||||
for _, obj := range objs {
|
||||
assert.Equal(t, false, o.Add(obj))
|
||||
}
|
||||
}
|
||||
o := NewLister().Start(f, "")
|
||||
gotObjs, gotDirs, err := o.GetAll()
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, objs, gotObjs)
|
||||
assert.Len(t, gotDirs, 0)
|
||||
}
|
||||
|
||||
func TestListerAddDir(t *testing.T) {
|
||||
f := &mockFs{}
|
||||
dirs := []*Dir{
|
||||
&Dir{Name: "1"},
|
||||
&Dir{Name: "2"},
|
||||
}
|
||||
f.listFn = func(o ListOpts, dir string) {
|
||||
for _, dir := range dirs {
|
||||
assert.Equal(t, false, o.AddDir(dir))
|
||||
}
|
||||
}
|
||||
o := NewLister().Start(f, "")
|
||||
gotObjs, gotDirs, err := o.GetAll()
|
||||
require.Nil(t, err)
|
||||
assert.Len(t, gotObjs, 0)
|
||||
assert.Equal(t, dirs, gotDirs)
|
||||
}
|
||||
|
||||
func TestListerIncludeDirectory(t *testing.T) {
|
||||
o := NewLister()
|
||||
assert.Equal(t, true, o.IncludeDirectory("whatever"))
|
||||
filter, err := NewFilter()
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, filter)
|
||||
require.Nil(t, filter.AddRule("!"))
|
||||
require.Nil(t, filter.AddRule("+ potato/*"))
|
||||
require.Nil(t, filter.AddRule("- *"))
|
||||
o.SetFilter(filter)
|
||||
assert.Equal(t, false, o.IncludeDirectory("floop"))
|
||||
assert.Equal(t, true, o.IncludeDirectory("potato"))
|
||||
assert.Equal(t, false, o.IncludeDirectory("potato/sausage"))
|
||||
}
|
||||
|
||||
func TestListerSetError(t *testing.T) {
|
||||
f := &mockFs{}
|
||||
f.listFn = func(o ListOpts, dir string) {
|
||||
assert.Equal(t, false, o.Add(mockObject("1")))
|
||||
o.SetError(errNotImpl)
|
||||
assert.Equal(t, true, o.Add(mockObject("2")))
|
||||
o.SetError(errors.New("not signalled"))
|
||||
assert.Equal(t, true, o.AddDir(&Dir{Name: "2"}))
|
||||
}
|
||||
o := NewLister().Start(f, "")
|
||||
gotObjs, gotDirs, err := o.GetAll()
|
||||
assert.Equal(t, err, errNotImpl)
|
||||
assert.Nil(t, gotObjs)
|
||||
assert.Nil(t, gotDirs)
|
||||
}
|
||||
|
||||
func TestListerIsFinished(t *testing.T) {
|
||||
f := &mockFs{}
|
||||
f.listFn = func(o ListOpts, dir string) {
|
||||
assert.Equal(t, false, o.IsFinished())
|
||||
o.Finished()
|
||||
assert.Equal(t, true, o.IsFinished())
|
||||
}
|
||||
o := NewLister().Start(f, "")
|
||||
gotObjs, gotDirs, err := o.GetAll()
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, gotObjs, 0)
|
||||
assert.Len(t, gotDirs, 0)
|
||||
}
|
||||
|
||||
func testListerGet(t *testing.T) *Lister {
|
||||
f := &mockFs{}
|
||||
f.listFn = func(o ListOpts, dir string) {
|
||||
assert.Equal(t, false, o.Add(mockObject("1")))
|
||||
assert.Equal(t, false, o.AddDir(&Dir{Name: "2"}))
|
||||
}
|
||||
return NewLister().Start(f, "")
|
||||
}
|
||||
|
||||
func TestListerGet(t *testing.T) {
|
||||
o := testListerGet(t)
|
||||
obj, dir, err := o.Get()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, obj.Remote(), "1")
|
||||
assert.Nil(t, dir)
|
||||
obj, dir, err = o.Get()
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, obj)
|
||||
assert.Equal(t, dir.Name, "2")
|
||||
obj, dir, err = o.Get()
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, obj)
|
||||
assert.Nil(t, dir)
|
||||
assert.Equal(t, true, o.IsFinished())
|
||||
}
|
||||
|
||||
func TestListerGetObject(t *testing.T) {
|
||||
o := testListerGet(t)
|
||||
obj, err := o.GetObject()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, obj.Remote(), "1")
|
||||
obj, err = o.GetObject()
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, obj)
|
||||
assert.Equal(t, true, o.IsFinished())
|
||||
}
|
||||
|
||||
func TestListerGetDir(t *testing.T) {
|
||||
o := testListerGet(t)
|
||||
dir, err := o.GetDir()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, dir.Name, "2")
|
||||
dir, err = o.GetDir()
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, dir)
|
||||
assert.Equal(t, true, o.IsFinished())
|
||||
}
|
||||
|
||||
func testListerGetError(t *testing.T) *Lister {
|
||||
f := &mockFs{}
|
||||
f.listFn = func(o ListOpts, dir string) {
|
||||
o.SetError(errNotImpl)
|
||||
}
|
||||
return NewLister().Start(f, "")
|
||||
}
|
||||
|
||||
func TestListerGetError(t *testing.T) {
|
||||
o := testListerGetError(t)
|
||||
obj, dir, err := o.Get()
|
||||
assert.Equal(t, err, errNotImpl)
|
||||
assert.Nil(t, obj)
|
||||
assert.Nil(t, dir)
|
||||
obj, dir, err = o.Get()
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, obj)
|
||||
assert.Nil(t, dir)
|
||||
assert.Equal(t, true, o.IsFinished())
|
||||
}
|
||||
|
||||
func TestListerGetObjectError(t *testing.T) {
|
||||
o := testListerGetError(t)
|
||||
obj, err := o.GetObject()
|
||||
assert.Equal(t, err, errNotImpl)
|
||||
assert.Nil(t, obj)
|
||||
obj, err = o.GetObject()
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, obj)
|
||||
assert.Equal(t, true, o.IsFinished())
|
||||
}
|
||||
|
||||
func TestListerGetDirError(t *testing.T) {
|
||||
o := testListerGetError(t)
|
||||
dir, err := o.GetDir()
|
||||
assert.Equal(t, err, errNotImpl)
|
||||
assert.Nil(t, dir)
|
||||
dir, err = o.GetDir()
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, dir)
|
||||
assert.Equal(t, true, o.IsFinished())
|
||||
}
|
||||
|
||||
func testListerGetAll(t *testing.T) (*Lister, []Object, []*Dir) {
|
||||
objs := []Object{
|
||||
mockObject("1f"),
|
||||
mockObject("2f"),
|
||||
mockObject("3f"),
|
||||
}
|
||||
dirs := []*Dir{
|
||||
&Dir{Name: "1d"},
|
||||
&Dir{Name: "2d"},
|
||||
}
|
||||
f := &mockFs{}
|
||||
f.listFn = func(o ListOpts, dir string) {
|
||||
assert.Equal(t, false, o.Add(objs[0]))
|
||||
assert.Equal(t, false, o.Add(objs[1]))
|
||||
assert.Equal(t, false, o.AddDir(dirs[0]))
|
||||
assert.Equal(t, false, o.Add(objs[2]))
|
||||
assert.Equal(t, false, o.AddDir(dirs[1]))
|
||||
}
|
||||
return NewLister().Start(f, ""), objs, dirs
|
||||
}
|
||||
|
||||
func TestListerGetAll(t *testing.T) {
|
||||
o, objs, dirs := testListerGetAll(t)
|
||||
gotObjs, gotDirs, err := o.GetAll()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, objs, gotObjs)
|
||||
assert.Equal(t, dirs, gotDirs)
|
||||
assert.Equal(t, true, o.IsFinished())
|
||||
}
|
||||
|
||||
func TestListerGetObjects(t *testing.T) {
|
||||
o, objs, _ := testListerGetAll(t)
|
||||
gotObjs, err := o.GetObjects()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, objs, gotObjs)
|
||||
assert.Equal(t, true, o.IsFinished())
|
||||
}
|
||||
|
||||
func TestListerGetDirs(t *testing.T) {
|
||||
o, _, dirs := testListerGetAll(t)
|
||||
gotDirs, err := o.GetDirs()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, dirs, gotDirs)
|
||||
assert.Equal(t, true, o.IsFinished())
|
||||
}
|
||||
|
||||
func testListerGetAllError(t *testing.T) *Lister {
|
||||
f := &mockFs{}
|
||||
f.listFn = func(o ListOpts, dir string) {
|
||||
o.SetError(errNotImpl)
|
||||
}
|
||||
return NewLister().Start(f, "")
|
||||
}
|
||||
|
||||
func TestListerGetAllError(t *testing.T) {
|
||||
o := testListerGetAllError(t)
|
||||
gotObjs, gotDirs, err := o.GetAll()
|
||||
assert.Equal(t, errNotImpl, err)
|
||||
assert.Len(t, gotObjs, 0)
|
||||
assert.Len(t, gotDirs, 0)
|
||||
assert.Equal(t, true, o.IsFinished())
|
||||
}
|
||||
|
||||
func TestListerGetObjectsError(t *testing.T) {
|
||||
o := testListerGetAllError(t)
|
||||
gotObjs, err := o.GetObjects()
|
||||
assert.Equal(t, errNotImpl, err)
|
||||
assert.Len(t, gotObjs, 0)
|
||||
assert.Equal(t, true, o.IsFinished())
|
||||
}
|
||||
|
||||
func TestListerGetDirsError(t *testing.T) {
|
||||
o := testListerGetAllError(t)
|
||||
gotDirs, err := o.GetDirs()
|
||||
assert.Equal(t, errNotImpl, err)
|
||||
assert.Len(t, gotDirs, 0)
|
||||
assert.Equal(t, true, o.IsFinished())
|
||||
}
|
||||
@@ -3,7 +3,6 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
)
|
||||
@@ -35,7 +34,7 @@ func (t *LoggedTransport) CancelRequest(req *http.Request) {
|
||||
if wrapped, ok := t.wrapped.(interface {
|
||||
CancelRequest(*http.Request)
|
||||
}); ok {
|
||||
log.Printf("CANCEL REQUEST %v", req)
|
||||
Debug(nil, "CANCEL REQUEST %v", req)
|
||||
wrapped.CancelRequest(req)
|
||||
}
|
||||
}
|
||||
@@ -43,15 +42,19 @@ func (t *LoggedTransport) CancelRequest(req *http.Request) {
|
||||
// RoundTrip implements the RoundTripper interface.
|
||||
func (t *LoggedTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
buf, _ := httputil.DumpRequestOut(req, t.logBody)
|
||||
log.Println(separatorReq)
|
||||
log.Println("HTTP REQUEST")
|
||||
log.Println(string(buf))
|
||||
log.Println(separatorReq)
|
||||
Debug(nil, "%s", separatorReq)
|
||||
Debug(nil, "%s", "HTTP REQUEST")
|
||||
Debug(nil, "%s", string(buf))
|
||||
Debug(nil, "%s", separatorReq)
|
||||
resp, err = t.wrapped.RoundTrip(req)
|
||||
buf, _ = httputil.DumpResponse(resp, t.logBody)
|
||||
log.Println(separatorResp)
|
||||
log.Println("HTTP RESPONSE")
|
||||
log.Println(string(buf))
|
||||
log.Println(separatorResp)
|
||||
Debug(nil, "%s", separatorResp)
|
||||
Debug(nil, "%s", "HTTP RESPONSE")
|
||||
if err != nil {
|
||||
Debug(nil, "Error: %v", err)
|
||||
} else {
|
||||
buf, _ = httputil.DumpResponse(resp, t.logBody)
|
||||
Debug(nil, "%s", string(buf))
|
||||
}
|
||||
Debug(nil, "%s", separatorResp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
146
fs/make_test_files.go
Normal file
146
fs/make_test_files.go
Normal file
@@ -0,0 +1,146 @@
|
||||
// +build ignore
|
||||
|
||||
// Build a directory structure with the required number of files in
|
||||
//
|
||||
// Run with go run make_test_files.go [flag] <directory>
|
||||
package main
|
||||
|
||||
import (
|
||||
cryptrand "crypto/rand"
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
numberOfFiles = flag.Int("n", 1000, "Number of files to create")
|
||||
averageFilesPerDirectory = flag.Int("files-per-directory", 10, "Average number of files per directory")
|
||||
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory heirachy")
|
||||
minFileSize = flag.Int64("min-size", 0, "Minimum size of file to create")
|
||||
maxFileSize = flag.Int64("max-size", 100, "Maximum size of files to create")
|
||||
minFileNameLength = flag.Int("min-name-length", 4, "Minimum size of file to create")
|
||||
maxFileNameLength = flag.Int("max-name-length", 12, "Maximum size of files to create")
|
||||
|
||||
directoriesToCreate int
|
||||
totalDirectories int
|
||||
fileNames = map[string]struct{}{} // keep a note of which file name we've used already
|
||||
)
|
||||
|
||||
// randomString create a random string for test purposes
|
||||
func randomString(n int) string {
|
||||
const (
|
||||
vowel = "aeiou"
|
||||
consonant = "bcdfghjklmnpqrstvwxyz"
|
||||
digit = "0123456789"
|
||||
)
|
||||
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
|
||||
out := make([]byte, n)
|
||||
p := 0
|
||||
for i := range out {
|
||||
source := pattern[p]
|
||||
p = (p + 1) % len(pattern)
|
||||
out[i] = source[rand.Intn(len(source))]
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// fileName creates a unique random file or directory name
|
||||
func fileName() (name string) {
|
||||
for {
|
||||
length := rand.Intn(*maxFileNameLength-*minFileNameLength) + *minFileNameLength
|
||||
name = randomString(length)
|
||||
if _, found := fileNames[name]; !found {
|
||||
break
|
||||
}
|
||||
}
|
||||
fileNames[name] = struct{}{}
|
||||
return name
|
||||
}
|
||||
|
||||
// dir is a directory in the directory heirachy being built up
|
||||
type dir struct {
|
||||
name string
|
||||
depth int
|
||||
children []*dir
|
||||
parent *dir
|
||||
}
|
||||
|
||||
// Create a random directory heirachy under d
|
||||
func (d *dir) createDirectories() {
|
||||
for totalDirectories < directoriesToCreate {
|
||||
newDir := &dir{
|
||||
name: fileName(),
|
||||
depth: d.depth + 1,
|
||||
parent: d,
|
||||
}
|
||||
d.children = append(d.children, newDir)
|
||||
totalDirectories++
|
||||
switch rand.Intn(4) {
|
||||
case 0:
|
||||
if d.depth < *maxDepth {
|
||||
newDir.createDirectories()
|
||||
}
|
||||
case 1:
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// list the directory heirachy
|
||||
func (d *dir) list(path string, output []string) []string {
|
||||
dirPath := path + "/" + d.name
|
||||
output = append(output, dirPath)
|
||||
for _, subDir := range d.children {
|
||||
output = subDir.list(dirPath, output)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// writeFile writes a random file at dir/name
|
||||
func writeFile(dir, name string) {
|
||||
err := os.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make directory %q: %v", dir, err)
|
||||
}
|
||||
path := filepath.Join(dir, name)
|
||||
fd, err := os.Create(path)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open file %q: %v", path, err)
|
||||
}
|
||||
size := rand.Int63n(*maxFileSize-*minFileSize) + *minFileSize
|
||||
_, err = io.CopyN(fd, cryptrand.Reader, size)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write %v bytes to file %q: %v", size, path, err)
|
||||
}
|
||||
err = fd.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to close file %q: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("Require 1 directory argument")
|
||||
}
|
||||
outputDirectory := args[0]
|
||||
log.Printf("Output dir %q", outputDirectory)
|
||||
|
||||
directoriesToCreate = *numberOfFiles / *averageFilesPerDirectory
|
||||
log.Printf("directoriesToCreate %v", directoriesToCreate)
|
||||
root := &dir{name: outputDirectory, depth: 1}
|
||||
for totalDirectories < directoriesToCreate {
|
||||
root.createDirectories()
|
||||
}
|
||||
dirs := root.list("", []string{})
|
||||
for i := 0; i < *numberOfFiles; i++ {
|
||||
dir := dirs[rand.Intn(len(dirs))]
|
||||
writeFile(dir, fileName())
|
||||
}
|
||||
}
|
||||
992
fs/operations.go
992
fs/operations.go
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
528
fs/sync.go
Normal file
528
fs/sync.go
Normal file
@@ -0,0 +1,528 @@
|
||||
// Implementation of sync/copy/move
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type syncCopyMove struct {
|
||||
// parameters
|
||||
fdst Fs
|
||||
fsrc Fs
|
||||
Delete bool
|
||||
DoMove bool
|
||||
dir string
|
||||
// internal state
|
||||
noTraverse bool // if set don't trafevers the dst
|
||||
deleteBefore bool // set if we must delete objects before copying
|
||||
dstFiles map[string]Object // dst files, only used if Delete
|
||||
srcFiles map[string]Object // src files, only used if deleteBefore
|
||||
srcFilesChan chan Object // passes src objects
|
||||
srcFilesResult chan error // error result of src listing
|
||||
dstFilesResult chan error // error result of dst listing
|
||||
abort chan struct{} // signal to abort the copiers
|
||||
checkerWg sync.WaitGroup // wait for checkers
|
||||
toBeChecked ObjectPairChan // checkers channel
|
||||
copierWg sync.WaitGroup // wait for copiers
|
||||
toBeUploaded ObjectPairChan // copiers channel
|
||||
errorMu sync.Mutex // Mutex covering the errors variables
|
||||
err error // normal error from copy process
|
||||
noRetryErr error // error with NoRetry set
|
||||
fatalErr error // fatal error
|
||||
}
|
||||
|
||||
func newSyncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) *syncCopyMove {
|
||||
s := &syncCopyMove{
|
||||
fdst: fdst,
|
||||
fsrc: fsrc,
|
||||
Delete: Delete,
|
||||
DoMove: DoMove,
|
||||
dir: "",
|
||||
srcFilesChan: make(chan Object, Config.Checkers+Config.Transfers),
|
||||
srcFilesResult: make(chan error, 1),
|
||||
dstFilesResult: make(chan error, 1),
|
||||
noTraverse: Config.NoTraverse,
|
||||
abort: make(chan struct{}),
|
||||
toBeChecked: make(ObjectPairChan, Config.Transfers),
|
||||
toBeUploaded: make(ObjectPairChan, Config.Transfers),
|
||||
deleteBefore: Delete && Config.DeleteBefore,
|
||||
}
|
||||
if s.noTraverse && s.Delete {
|
||||
Debug(s.fdst, "Ignoring --no-traverse with sync")
|
||||
s.noTraverse = false
|
||||
}
|
||||
return s
|
||||
|
||||
}
|
||||
|
||||
// Check to see if have set the abort flag
|
||||
func (s *syncCopyMove) aborting() bool {
|
||||
select {
|
||||
case <-s.abort:
|
||||
return true
|
||||
default:
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// This reads the source files from s.srcFiles into srcFilesChan then
|
||||
// closes it
|
||||
//
|
||||
// It returns the final result of the read into s.srcFilesResult
|
||||
func (s *syncCopyMove) readSrcUsingMap() {
|
||||
outer:
|
||||
for _, o := range s.srcFiles {
|
||||
if s.aborting() {
|
||||
break outer
|
||||
}
|
||||
select {
|
||||
case s.srcFilesChan <- o:
|
||||
case <-s.abort:
|
||||
break outer
|
||||
}
|
||||
}
|
||||
close(s.srcFilesChan)
|
||||
s.srcFilesResult <- nil
|
||||
}
|
||||
|
||||
// This reads the source files into srcFilesChan then closes it
|
||||
//
|
||||
// It returns the final result of the read into s.srcFilesResult
|
||||
func (s *syncCopyMove) readSrcUsingChan() {
|
||||
err := readFilesFn(s.fsrc, false, s.dir, func(o Object) error {
|
||||
if s.aborting() {
|
||||
return ErrorListAborted
|
||||
}
|
||||
select {
|
||||
case s.srcFilesChan <- o:
|
||||
case <-s.abort:
|
||||
return ErrorListAborted
|
||||
}
|
||||
return nil
|
||||
})
|
||||
close(s.srcFilesChan)
|
||||
s.srcFilesResult <- err
|
||||
}
|
||||
|
||||
// This reads the destination files in into dstFiles
|
||||
//
|
||||
// It returns the final result of the read into s.dstFilesResult
|
||||
func (s *syncCopyMove) readDstFiles() {
|
||||
var err error
|
||||
s.dstFiles, err = readFilesMap(s.fdst, Config.Filter.DeleteExcluded, s.dir)
|
||||
s.dstFilesResult <- err
|
||||
}
|
||||
|
||||
// Check to see if src needs to be copied to dst and if so puts it in out
|
||||
func (s *syncCopyMove) checkOne(pair ObjectPair, out ObjectPairChan) {
|
||||
src, dst := pair.src, pair.dst
|
||||
if dst == nil {
|
||||
Debug(src, "Couldn't find file - need to transfer")
|
||||
out <- pair
|
||||
return
|
||||
}
|
||||
// Check to see if can store this
|
||||
if !src.Storable() {
|
||||
return
|
||||
}
|
||||
// If we should ignore existing files, don't transfer
|
||||
if Config.IgnoreExisting {
|
||||
Debug(src, "Destination exists, skipping")
|
||||
return
|
||||
}
|
||||
// If we should upload unconditionally
|
||||
if Config.IgnoreTimes {
|
||||
Debug(src, "Uploading unconditionally as --ignore-times is in use")
|
||||
out <- pair
|
||||
return
|
||||
}
|
||||
// If UpdateOlder is in effect, skip if dst is newer than src
|
||||
if Config.UpdateOlder {
|
||||
srcModTime := src.ModTime()
|
||||
dstModTime := dst.ModTime()
|
||||
dt := dstModTime.Sub(srcModTime)
|
||||
// If have a mutually agreed precision then use that
|
||||
modifyWindow := Config.ModifyWindow
|
||||
if modifyWindow == ModTimeNotSupported {
|
||||
// Otherwise use 1 second as a safe default as
|
||||
// the resolution of the time a file was
|
||||
// uploaded.
|
||||
modifyWindow = time.Second
|
||||
}
|
||||
switch {
|
||||
case dt >= modifyWindow:
|
||||
Debug(src, "Destination is newer than source, skipping")
|
||||
return
|
||||
case dt <= -modifyWindow:
|
||||
Debug(src, "Destination is older than source, transferring")
|
||||
default:
|
||||
if src.Size() == dst.Size() {
|
||||
Debug(src, "Destination mod time is within %v of source and sizes identical, skipping", modifyWindow)
|
||||
return
|
||||
}
|
||||
Debug(src, "Destination mod time is within %v of source but sizes differ, transferring", modifyWindow)
|
||||
}
|
||||
} else {
|
||||
// Check to see if changed or not
|
||||
if Equal(src, dst) {
|
||||
Debug(src, "Unchanged skipping")
|
||||
return
|
||||
}
|
||||
}
|
||||
out <- pair
|
||||
}
|
||||
|
||||
// This checks the types of errors returned while copying files
|
||||
func (s *syncCopyMove) processError(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
s.errorMu.Lock()
|
||||
defer s.errorMu.Unlock()
|
||||
switch {
|
||||
case IsFatalError(err):
|
||||
close(s.abort)
|
||||
s.fatalErr = err
|
||||
case IsNoRetryError(err):
|
||||
s.noRetryErr = err
|
||||
default:
|
||||
s.err = err
|
||||
}
|
||||
}
|
||||
|
||||
// pairChecker reads Objects~s on in send to out if they need transferring.
|
||||
//
|
||||
// FIXME potentially doing lots of hashes at once
|
||||
func (s *syncCopyMove) pairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
if s.aborting() {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case pair, ok := <-in:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
src := pair.src
|
||||
Stats.Checking(src.Remote())
|
||||
s.checkOne(pair, out)
|
||||
Stats.DoneChecking(src.Remote())
|
||||
case <-s.abort:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pairCopier reads Objects on in and copies them.
|
||||
func (s *syncCopyMove) pairCopier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
if s.aborting() {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case pair, ok := <-in:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
src := pair.src
|
||||
Stats.Transferring(src.Remote())
|
||||
if Config.DryRun {
|
||||
Log(src, "Not copying as --dry-run")
|
||||
} else {
|
||||
s.processError(Copy(fdst, pair.dst, src))
|
||||
}
|
||||
Stats.DoneTransferring(src.Remote())
|
||||
case <-s.abort:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pairMover reads Objects on in and moves them if possible, or copies
|
||||
// them if not
|
||||
func (s *syncCopyMove) pairMover(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// See if we have Move available
|
||||
fdstMover, haveMover := fdst.(Mover)
|
||||
for {
|
||||
if s.aborting() {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case pair, ok := <-in:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
src := pair.src
|
||||
dst := pair.dst
|
||||
Stats.Transferring(src.Remote())
|
||||
if Config.DryRun {
|
||||
Log(src, "Not moving as --dry-run")
|
||||
} else if haveMover && src.Fs().Name() == fdst.Name() {
|
||||
// Delete destination if it exists
|
||||
if dst != nil {
|
||||
s.processError(DeleteFile(src))
|
||||
}
|
||||
// Move dst <- src
|
||||
_, err := fdstMover.Move(src, src.Remote())
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(dst, "Couldn't move: %v", err)
|
||||
s.processError(err)
|
||||
} else {
|
||||
Debug(src, "Moved")
|
||||
}
|
||||
} else {
|
||||
// Copy dst <- src
|
||||
err := Copy(fdst, dst, src)
|
||||
s.processError(err)
|
||||
if err != nil {
|
||||
ErrorLog(src, "Not deleting as copy failed: %v", err)
|
||||
} else {
|
||||
// Delete src if no error on copy
|
||||
s.processError(DeleteFile(src))
|
||||
}
|
||||
}
|
||||
Stats.DoneTransferring(src.Remote())
|
||||
case <-s.abort:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This starts the background checkers.
|
||||
func (s *syncCopyMove) startCheckers() {
|
||||
s.checkerWg.Add(Config.Checkers)
|
||||
for i := 0; i < Config.Checkers; i++ {
|
||||
go s.pairChecker(s.toBeChecked, s.toBeUploaded, &s.checkerWg)
|
||||
}
|
||||
}
|
||||
|
||||
// This stops the background checkers
|
||||
func (s *syncCopyMove) stopCheckers() {
|
||||
close(s.toBeChecked)
|
||||
Log(s.fdst, "Waiting for checks to finish")
|
||||
s.checkerWg.Wait()
|
||||
}
|
||||
|
||||
// This starts the background transfers
|
||||
func (s *syncCopyMove) startTransfers() {
|
||||
s.copierWg.Add(Config.Transfers)
|
||||
for i := 0; i < Config.Transfers; i++ {
|
||||
if s.DoMove {
|
||||
go s.pairMover(s.toBeUploaded, s.fdst, &s.copierWg)
|
||||
} else {
|
||||
go s.pairCopier(s.toBeUploaded, s.fdst, &s.copierWg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This stops the background transfers
|
||||
func (s *syncCopyMove) stopTransfers() {
|
||||
close(s.toBeUploaded)
|
||||
Log(s.fdst, "Waiting for transfers to finish")
|
||||
s.copierWg.Wait()
|
||||
}
|
||||
|
||||
// This deletes the files in the dstFiles map. If checkSrcMap is set
|
||||
// then it checks to see if they exist first in srcFiles the source
|
||||
// file map, otherwise it unconditionally deletes them. If
|
||||
// checkSrcMap is clear then it assumes that the any source files that
|
||||
// have been found have been removed from dstFiles already.
|
||||
func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
|
||||
if Stats.Errored() {
|
||||
ErrorLog(s.fdst, "%v", ErrorNotDeleting)
|
||||
return ErrorNotDeleting
|
||||
}
|
||||
|
||||
// Delete the spare files
|
||||
toDelete := make(ObjectsChan, Config.Transfers)
|
||||
go func() {
|
||||
for remote, o := range s.dstFiles {
|
||||
if checkSrcMap {
|
||||
_, exists := s.srcFiles[remote]
|
||||
if exists {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if s.aborting() {
|
||||
break
|
||||
}
|
||||
toDelete <- o
|
||||
}
|
||||
close(toDelete)
|
||||
}()
|
||||
return DeleteFiles(toDelete)
|
||||
}
|
||||
|
||||
// Syncs fsrc into fdst
|
||||
//
|
||||
// If Delete is true then it deletes any files in fdst that aren't in fsrc
|
||||
//
|
||||
// If DoMove is true then files will be moved instead of copied
|
||||
//
|
||||
// dir is the start directory, "" for root
|
||||
func (s *syncCopyMove) run() error {
|
||||
if Same(s.fdst, s.fsrc) {
|
||||
ErrorLog(s.fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
}
|
||||
|
||||
err := Mkdir(s.fdst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start reading dstFiles if required
|
||||
if !s.noTraverse {
|
||||
go s.readDstFiles()
|
||||
}
|
||||
|
||||
// If s.deleteBefore then we need to read the whole source map first
|
||||
if s.deleteBefore {
|
||||
// Read source files into the map
|
||||
s.srcFiles, err = readFilesMap(s.fsrc, false, s.dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Pump the map into s.srcFilesChan
|
||||
go s.readSrcUsingMap()
|
||||
} else {
|
||||
go s.readSrcUsingChan()
|
||||
}
|
||||
|
||||
// Wait for dstfiles to finish reading if we were reading them
|
||||
// and report any errors
|
||||
if !s.noTraverse {
|
||||
err = <-s.dstFilesResult
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Delete files first if required
|
||||
// Have dstFiles and srcFiles complete at this point
|
||||
if s.deleteBefore {
|
||||
err = s.deleteFiles(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Start background checking and transferring pipeline
|
||||
s.startCheckers()
|
||||
s.startTransfers()
|
||||
|
||||
// Do the transfers
|
||||
for src := range s.srcFilesChan {
|
||||
remote := src.Remote()
|
||||
var dst Object
|
||||
if s.noTraverse {
|
||||
var err error
|
||||
dst, err = s.fdst.NewObject(remote)
|
||||
if err != nil {
|
||||
dst = nil
|
||||
if err != ErrorObjectNotFound {
|
||||
Debug(src, "Error making NewObject: %v", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dst = s.dstFiles[remote]
|
||||
// Remove file from s.dstFiles because it exists in srcFiles
|
||||
delete(s.dstFiles, remote)
|
||||
}
|
||||
if dst != nil {
|
||||
s.toBeChecked <- ObjectPair{src, dst}
|
||||
} else {
|
||||
// No need to check since doesn't exist
|
||||
s.toBeUploaded <- ObjectPair{src, nil}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop background checking and transferring pipeline
|
||||
s.stopCheckers()
|
||||
s.stopTransfers()
|
||||
|
||||
// Retrieve the delayed error from the source listing goroutine
|
||||
err = <-s.srcFilesResult
|
||||
|
||||
// Delete files during or after
|
||||
if s.Delete && (Config.DeleteDuring || Config.DeleteAfter) {
|
||||
if err != nil {
|
||||
ErrorLog(s.fdst, "%v", ErrorNotDeleting)
|
||||
} else {
|
||||
err = s.deleteFiles(false)
|
||||
}
|
||||
}
|
||||
|
||||
// Return errors in the precedence
|
||||
// fatalErr
|
||||
// error from above
|
||||
// error from a copy
|
||||
// noRetryErr
|
||||
s.processError(err)
|
||||
if s.fatalErr != nil {
|
||||
return s.fatalErr
|
||||
}
|
||||
if s.err != nil {
|
||||
return s.err
|
||||
}
|
||||
return s.noRetryErr
|
||||
}
|
||||
|
||||
// Sync fsrc into fdst
|
||||
func Sync(fdst, fsrc Fs) error {
|
||||
return newSyncCopyMove(fdst, fsrc, true, false).run()
|
||||
}
|
||||
|
||||
// CopyDir copies fsrc into fdst
|
||||
func CopyDir(fdst, fsrc Fs) error {
|
||||
return newSyncCopyMove(fdst, fsrc, false, false).run()
|
||||
}
|
||||
|
||||
// moveDir moves fsrc into fdst
|
||||
func moveDir(fdst, fsrc Fs) error {
|
||||
return newSyncCopyMove(fdst, fsrc, false, true).run()
|
||||
}
|
||||
|
||||
// MoveDir moves fsrc into fdst
|
||||
func MoveDir(fdst, fsrc Fs) error {
|
||||
if Same(fdst, fsrc) {
|
||||
ErrorLog(fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
}
|
||||
// The two remotes mustn't overlap
|
||||
if Overlapping(fdst, fsrc) {
|
||||
err := ErrorCantMoveOverlapping
|
||||
ErrorLog(fdst, "%v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// First attempt to use DirMover if exists, same Fs and no filters are active
|
||||
if fdstDirMover, ok := fdst.(DirMover); ok && fsrc.Name() == fdst.Name() && Config.Filter.InActive() {
|
||||
if Config.DryRun {
|
||||
Log(fdst, "Not doing server side directory move as --dry-run")
|
||||
return nil
|
||||
}
|
||||
Debug(fdst, "Using server side directory move")
|
||||
err := fdstDirMover.DirMove(fsrc)
|
||||
switch err {
|
||||
case ErrorCantDirMove, ErrorDirExists:
|
||||
Debug(fdst, "Server side directory move failed - fallback to file moves: %v", err)
|
||||
case nil:
|
||||
Debug(fdst, "Server side directory move succeeded")
|
||||
return nil
|
||||
default:
|
||||
Stats.Error()
|
||||
ErrorLog(fdst, "Server side directory move failed: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise move the files one by one
|
||||
return moveDir(fdst, fsrc)
|
||||
}
|
||||
662
fs/sync_test.go
Normal file
662
fs/sync_test.go
Normal file
@@ -0,0 +1,662 @@
|
||||
// Test sync/copy/move
|
||||
|
||||
package fs_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Check dry run is working
|
||||
func TestCopyWithDryRun(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
fs.Config.DryRun = true
|
||||
err := fs.CopyDir(r.fremote, r.flocal)
|
||||
fs.Config.DryRun = false
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote)
|
||||
}
|
||||
|
||||
// Now without dry run
|
||||
func TestCopy(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
err := fs.CopyDir(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Now with --no-traverse
|
||||
func TestCopyNoTraverse(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.NoTraverse = true
|
||||
defer func() { fs.Config.NoTraverse = false }()
|
||||
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
err := fs.CopyDir(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Now with --no-traverse
|
||||
func TestSyncNoTraverse(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.NoTraverse = true
|
||||
defer func() { fs.Config.NoTraverse = false }()
|
||||
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Test copy with depth
|
||||
func TestCopyWithDepth(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
file2 := r.WriteFile("hello world2", "hello world2", t2)
|
||||
|
||||
// Check the MaxDepth too
|
||||
fs.Config.MaxDepth = 1
|
||||
defer func() { fs.Config.MaxDepth = -1 }()
|
||||
|
||||
err := fs.CopyDir(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
}
|
||||
|
||||
// Test a server side copy if possible, or the backup path if not
|
||||
func TestServerSideCopy(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
fremoteCopy, _, finaliseCopy, err := fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
require.NoError(t, err)
|
||||
defer finaliseCopy()
|
||||
t.Logf("Server side copy (if possible) %v -> %v", r.fremote, fremoteCopy)
|
||||
|
||||
err = fs.CopyDir(fremoteCopy, r.fremote)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, fremoteCopy, file1)
|
||||
}
|
||||
|
||||
// Check that if the local file doesn't exist when we copy it up,
|
||||
// nothing happens to the remote file
|
||||
func TestCopyAfterDelete(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
|
||||
fstest.CheckItems(t, r.flocal)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
err := fs.Mkdir(r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fs.CopyDir(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Check the copy downloading a file
|
||||
func TestCopyRedownload(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
err := fs.CopyDir(r.flocal, r.fremote)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
}
|
||||
|
||||
// Create a file and sync it. Change the last modified date and resync.
|
||||
// If we're only doing sync by size and checksum, we expect nothing to
|
||||
// to be transferred on the second sync.
|
||||
func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.CheckSum = true
|
||||
defer func() { fs.Config.CheckSum = false }()
|
||||
|
||||
file1 := r.WriteFile("check sum", "", t1)
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
// Change last modified date only
|
||||
file2 := r.WriteFile("check sum", "", t2)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Create a file and sync it. Change the last modified date and the
|
||||
// file contents but not the size. If we're only doing sync by size
|
||||
// only, we expect nothing to to be transferred on the second sync.
|
||||
func TestSyncSizeOnly(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.SizeOnly = true
|
||||
defer func() { fs.Config.SizeOnly = false }()
|
||||
|
||||
file1 := r.WriteFile("sizeonly", "potato", t1)
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
// Update mtime, md5sum but not length of file
|
||||
file2 := r.WriteFile("sizeonly", "POTATO", t2)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Create a file and sync it. Keep the last modified date but change
|
||||
// the size. With --ignore-size we expect nothing to to be
|
||||
// transferred on the second sync.
|
||||
func TestSyncIgnoreSize(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.IgnoreSize = true
|
||||
defer func() { fs.Config.IgnoreSize = false }()
|
||||
|
||||
file1 := r.WriteFile("ignore-size", "contents", t1)
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
// Update size but not date of file
|
||||
file2 := r.WriteFile("ignore-size", "longer contents but same date", t1)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
func TestSyncIgnoreTimes(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("existing", "potato", t1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly 0 files because the
|
||||
// files were identical.
|
||||
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
|
||||
|
||||
fs.Config.IgnoreTimes = true
|
||||
defer func() { fs.Config.IgnoreTimes = false }()
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file even though the
|
||||
// files were identical.
|
||||
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
func TestSyncIgnoreExisting(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("existing", "potato", t1)
|
||||
|
||||
fs.Config.IgnoreExisting = true
|
||||
defer func() { fs.Config.IgnoreExisting = false }()
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
// Change everything
|
||||
r.WriteFile("existing", "newpotatoes", t2)
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
// Items should not change
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("empty space", "", t2)
|
||||
file2 := r.WriteObject("empty space", "", t1)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.NoUpdateModTime = true
|
||||
defer func() {
|
||||
fs.Config.NoUpdateModTime = false
|
||||
}()
|
||||
|
||||
file1 := r.WriteFile("empty space", "", t2)
|
||||
file2 := r.WriteObject("empty space", "", t1)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
}
|
||||
|
||||
func TestSyncAfterAddingAFile(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("empty space", "", t2)
|
||||
file2 := r.WriteFile("potato", "------------------------------------------------------------", t3)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.fremote, file1, file2)
|
||||
}
|
||||
|
||||
func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("potato", "------------------------------------------------------------", t3)
|
||||
file2 := r.WriteFile("potato", "smaller but same date", t3)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
}
|
||||
|
||||
// Sync after changing a file's contents, changing modtime but length
|
||||
// remaining the same
|
||||
func TestSyncAfterChangingContentsOnly(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
var file1 fstest.Item
|
||||
if r.fremote.Precision() == fs.ModTimeNotSupported {
|
||||
t.Logf("ModTimeNotSupported so forcing file to be a different size")
|
||||
file1 = r.WriteObject("potato", "different size to make sure it syncs", t3)
|
||||
} else {
|
||||
file1 = r.WriteObject("potato", "smaller but same date", t3)
|
||||
}
|
||||
file2 := r.WriteFile("potato", "SMALLER BUT SAME DATE", t2)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
}
|
||||
|
||||
// Sync after removing a file and adding a file --dry-run
|
||||
func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteObject("potato", "SMALLER BUT SAME DATE", t2)
|
||||
file3 := r.WriteBoth("empty space", "", t2)
|
||||
|
||||
fs.Config.DryRun = true
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
fs.Config.DryRun = false
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file3, file1)
|
||||
fstest.CheckItems(t, r.fremote, file3, file2)
|
||||
}
|
||||
|
||||
// Sync after removing a file and adding a file
|
||||
func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteObject("potato", "SMALLER BUT SAME DATE", t2)
|
||||
file3 := r.WriteBoth("empty space", "", t2)
|
||||
fstest.CheckItems(t, r.fremote, file2, file3)
|
||||
fstest.CheckItems(t, r.flocal, file1, file3)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file1, file3)
|
||||
fstest.CheckItems(t, r.fremote, file1, file3)
|
||||
}
|
||||
|
||||
// Sync after removing a file and adding a file with IO Errors
|
||||
func TestSyncAfterRemovingAFileAndAddingAFileWithErrors(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteObject("potato", "SMALLER BUT SAME DATE", t2)
|
||||
file3 := r.WriteBoth("empty space", "", t2)
|
||||
fstest.CheckItems(t, r.fremote, file2, file3)
|
||||
fstest.CheckItems(t, r.flocal, file1, file3)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
fs.Stats.Error()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
assert.Equal(t, fs.ErrorNotDeleting, err)
|
||||
fstest.CheckItems(t, r.flocal, file1, file3)
|
||||
fstest.CheckItems(t, r.fremote, file1, file2, file3)
|
||||
}
|
||||
|
||||
// Sync test delete during
|
||||
func TestSyncDeleteDuring(t *testing.T) {
|
||||
// This is the default so we've checked this already
|
||||
// check it is the default
|
||||
if !(!fs.Config.DeleteBefore && fs.Config.DeleteDuring && !fs.Config.DeleteAfter) {
|
||||
t.Fatalf("Didn't default to --delete-during")
|
||||
}
|
||||
}
|
||||
|
||||
// Sync test delete before
|
||||
func TestSyncDeleteBefore(t *testing.T) {
|
||||
fs.Config.DeleteBefore = true
|
||||
fs.Config.DeleteDuring = false
|
||||
fs.Config.DeleteAfter = false
|
||||
defer func() {
|
||||
fs.Config.DeleteBefore = false
|
||||
fs.Config.DeleteDuring = true
|
||||
fs.Config.DeleteAfter = false
|
||||
}()
|
||||
|
||||
TestSyncAfterRemovingAFileAndAddingAFile(t)
|
||||
}
|
||||
|
||||
// Sync test delete after
|
||||
func TestSyncDeleteAfter(t *testing.T) {
|
||||
fs.Config.DeleteBefore = false
|
||||
fs.Config.DeleteDuring = false
|
||||
fs.Config.DeleteAfter = true
|
||||
defer func() {
|
||||
fs.Config.DeleteBefore = false
|
||||
fs.Config.DeleteDuring = true
|
||||
fs.Config.DeleteAfter = false
|
||||
}()
|
||||
|
||||
TestSyncAfterRemovingAFileAndAddingAFile(t)
|
||||
}
|
||||
|
||||
// Test with exclude
|
||||
func TestSyncWithExclude(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteBoth("empty space", "", t2)
|
||||
file3 := r.WriteFile("enormous", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
|
||||
fstest.CheckItems(t, r.fremote, file1, file2)
|
||||
fstest.CheckItems(t, r.flocal, file1, file2, file3)
|
||||
|
||||
fs.Config.Filter.MaxSize = 40
|
||||
defer func() {
|
||||
fs.Config.Filter.MaxSize = -1
|
||||
}()
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.fremote, file2, file1)
|
||||
|
||||
// Now sync the other way round and check enormous doesn't get
|
||||
// deleted as it is excluded from the sync
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.flocal, r.fremote)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file2, file1, file3)
|
||||
}
|
||||
|
||||
// Test with exclude and delete excluded
|
||||
func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1) // 60 bytes
|
||||
file2 := r.WriteBoth("empty space", "", t2)
|
||||
file3 := r.WriteBoth("enormous", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
|
||||
fstest.CheckItems(t, r.fremote, file1, file2, file3)
|
||||
fstest.CheckItems(t, r.flocal, file1, file2, file3)
|
||||
|
||||
fs.Config.Filter.MaxSize = 40
|
||||
fs.Config.Filter.DeleteExcluded = true
|
||||
defer func() {
|
||||
fs.Config.Filter.MaxSize = -1
|
||||
fs.Config.Filter.DeleteExcluded = false
|
||||
}()
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
|
||||
// Check sync the other way round to make sure enormous gets
|
||||
// deleted even though it is excluded
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.flocal, r.fremote)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
}
|
||||
|
||||
// Test with UpdateOlder set
|
||||
func TestSyncWithUpdateOlder(t *testing.T) {
|
||||
if fs.Config.ModifyWindow == fs.ModTimeNotSupported {
|
||||
t.Skip("Can't run this test on fs which doesn't support mod time")
|
||||
}
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
t2plus := t2.Add(time.Second / 2)
|
||||
t2minus := t2.Add(time.Second / 2)
|
||||
oneF := r.WriteFile("one", "one", t1)
|
||||
twoF := r.WriteFile("two", "two", t3)
|
||||
threeF := r.WriteFile("three", "three", t2)
|
||||
fourF := r.WriteFile("four", "four", t2)
|
||||
fiveF := r.WriteFile("five", "five", t2)
|
||||
fstest.CheckItems(t, r.flocal, oneF, twoF, threeF, fourF, fiveF)
|
||||
oneO := r.WriteObject("one", "ONE", t2)
|
||||
twoO := r.WriteObject("two", "TWO", t2)
|
||||
threeO := r.WriteObject("three", "THREE", t2plus)
|
||||
fourO := r.WriteObject("four", "FOURFOUR", t2minus)
|
||||
fstest.CheckItems(t, r.fremote, oneO, twoO, threeO, fourO)
|
||||
|
||||
fs.Config.UpdateOlder = true
|
||||
oldModifyWindow := fs.Config.ModifyWindow
|
||||
fs.Config.ModifyWindow = fs.ModTimeNotSupported
|
||||
defer func() {
|
||||
fs.Config.UpdateOlder = false
|
||||
fs.Config.ModifyWindow = oldModifyWindow
|
||||
}()
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.fremote, oneO, twoF, threeO, fourF, fiveF)
|
||||
}
|
||||
|
||||
// Test a server side move if possible, or the backup path if not
|
||||
func testServerSideMove(t *testing.T, r *Run, fremoteMove fs.Fs, withFilter bool) {
|
||||
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteBoth("empty space", "", t2)
|
||||
|
||||
fstest.CheckItems(t, r.fremote, file2, file1)
|
||||
|
||||
t.Logf("Server side move (if possible) %v -> %v", r.fremote, fremoteMove)
|
||||
|
||||
// Write just one file in the new remote
|
||||
r.WriteObjectTo(fremoteMove, "empty space", "", t2, false)
|
||||
fstest.CheckItems(t, fremoteMove, file2)
|
||||
|
||||
// Do server side move
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.MoveDir(fremoteMove, r.fremote)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
fstest.CheckItems(t, fremoteMove, file2, file1)
|
||||
|
||||
// Purge the original before moving
|
||||
require.NoError(t, fs.Purge(r.fremote))
|
||||
|
||||
// Move it back again, dst does not exist this time
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.MoveDir(r.fremote, fremoteMove)
|
||||
require.NoError(t, err)
|
||||
|
||||
if withFilter {
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
fstest.CheckItems(t, fremoteMove, file2)
|
||||
} else {
|
||||
fstest.CheckItems(t, r.fremote, file2, file1)
|
||||
fstest.CheckItems(t, fremoteMove)
|
||||
}
|
||||
}
|
||||
|
||||
// Test a server side move if possible, or the backup path if not
|
||||
func TestServerSideMove(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
fremoteMove, _, finaliseMove, err := fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
require.NoError(t, err)
|
||||
defer finaliseMove()
|
||||
testServerSideMove(t, r, fremoteMove, false)
|
||||
}
|
||||
|
||||
// Test a server side move if possible, or the backup path if not
|
||||
func TestServerSideMoveWithFilter(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.Filter.MinSize = 40
|
||||
defer func() {
|
||||
fs.Config.Filter.MinSize = -1
|
||||
}()
|
||||
|
||||
fremoteMove, _, finaliseMove, err := fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
require.NoError(t, err)
|
||||
defer finaliseMove()
|
||||
testServerSideMove(t, r, fremoteMove, true)
|
||||
}
|
||||
|
||||
// Test a server side move with overlap
|
||||
func TestServerSideMoveOverlap(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
subRemoteName := r.fremoteName + "/rclone-move-test"
|
||||
fremoteMove, err := fs.NewFs(subRemoteName)
|
||||
require.NoError(t, err)
|
||||
|
||||
file1 := r.WriteObject("potato2", "------------------------------------------------------------", t1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
// Subdir move with no filters should return ErrorCantMoveOverlapping
|
||||
err = fs.MoveDir(fremoteMove, r.fremote)
|
||||
assert.EqualError(t, err, fs.ErrorCantMoveOverlapping.Error())
|
||||
|
||||
// Now try with a filter which should also fail with ErrorCantMoveOverlapping
|
||||
fs.Config.Filter.MinSize = 40
|
||||
defer func() {
|
||||
fs.Config.Filter.MinSize = -1
|
||||
}()
|
||||
err = fs.MoveDir(fremoteMove, r.fremote)
|
||||
assert.EqualError(t, err, fs.ErrorCantMoveOverlapping.Error())
|
||||
}
|
||||
262
fs/test_all.go
Normal file
262
fs/test_all.go
Normal file
@@ -0,0 +1,262 @@
|
||||
// +build ignore
|
||||
|
||||
// Run tests for all the remotes
|
||||
//
|
||||
// Run with go run test_all.go
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
_ "github.com/ncw/rclone/fs/all" // import all fs
|
||||
"github.com/ncw/rclone/fstest"
|
||||
)
|
||||
|
||||
var (
|
||||
remotes = []string{
|
||||
"TestSwift:",
|
||||
"TestS3:",
|
||||
"TestDrive:",
|
||||
"TestGoogleCloudStorage:",
|
||||
"TestDropbox:",
|
||||
"TestAmazonCloudDrive:",
|
||||
"TestOneDrive:",
|
||||
"TestHubic:",
|
||||
"TestB2:",
|
||||
"TestYandex:",
|
||||
}
|
||||
binary = "fs.test"
|
||||
// Flags
|
||||
maxTries = flag.Int("maxtries", 5, "Number of times to try each test")
|
||||
runTests = flag.String("remotes", "", "Comma separated list of remotes to test, eg 'TestSwift:,TestS3'")
|
||||
verbose = flag.Bool("verbose", false, "Run the tests with -v")
|
||||
clean = flag.Bool("clean", false, "Instead of testing, clean all left over test directories")
|
||||
runOnly = flag.String("run-only", "", "Run only those tests matching the regexp supplied")
|
||||
)
|
||||
|
||||
// test holds info about a running test
|
||||
type test struct {
|
||||
remote string
|
||||
subdir bool
|
||||
cmdLine []string
|
||||
cmdString string
|
||||
try int
|
||||
err error
|
||||
output []byte
|
||||
failedTests []string
|
||||
runFlag string
|
||||
}
|
||||
|
||||
// newTest creates a new test
|
||||
func newTest(remote string, subdir bool) *test {
|
||||
t := &test{
|
||||
remote: remote,
|
||||
subdir: subdir,
|
||||
cmdLine: []string{"./" + binary, "-remote", remote},
|
||||
try: 1,
|
||||
}
|
||||
if *verbose {
|
||||
t.cmdLine = append(t.cmdLine, "-test.v")
|
||||
}
|
||||
if *runOnly != "" {
|
||||
t.cmdLine = append(t.cmdLine, "-test.run", *runOnly)
|
||||
}
|
||||
if subdir {
|
||||
t.cmdLine = append(t.cmdLine, "-subdir")
|
||||
}
|
||||
t.cmdString = strings.Join(t.cmdLine, " ")
|
||||
return t
|
||||
}
|
||||
|
||||
// dumpOutput prints the error output
|
||||
func (t *test) dumpOutput() {
|
||||
log.Println("------------------------------------------------------------")
|
||||
log.Printf("---- %q ----", t.cmdString)
|
||||
log.Println(string(t.output))
|
||||
log.Println("------------------------------------------------------------")
|
||||
}
|
||||
|
||||
var failRe = regexp.MustCompile(`(?m)^--- FAIL: (Test\w*) \(`)
|
||||
|
||||
// findFailures looks for all the tests which failed
|
||||
func (t *test) findFailures() {
|
||||
oldFailedTests := t.failedTests
|
||||
t.failedTests = nil
|
||||
for _, matches := range failRe.FindAllSubmatch(t.output, -1) {
|
||||
t.failedTests = append(t.failedTests, string(matches[1]))
|
||||
}
|
||||
if len(t.failedTests) != 0 {
|
||||
t.runFlag = "^(" + strings.Join(t.failedTests, "|") + ")$"
|
||||
} else {
|
||||
t.runFlag = ""
|
||||
}
|
||||
if t.passed() && len(t.failedTests) != 0 {
|
||||
log.Printf("%q - Expecting no errors but got: %v", t.cmdString, t.failedTests)
|
||||
t.dumpOutput()
|
||||
} else if !t.passed() && len(t.failedTests) == 0 {
|
||||
log.Printf("%q - Expecting errors but got none: %v", t.cmdString, t.failedTests)
|
||||
t.dumpOutput()
|
||||
t.failedTests = oldFailedTests
|
||||
}
|
||||
}
|
||||
|
||||
// trial runs a single test
|
||||
func (t *test) trial() {
|
||||
cmdLine := t.cmdLine[:]
|
||||
if t.runFlag != "" {
|
||||
cmdLine = append(cmdLine, "-test.run", t.runFlag)
|
||||
}
|
||||
cmdString := strings.Join(cmdLine, " ")
|
||||
log.Printf("%q - Starting (try %d/%d)", cmdString, t.try, *maxTries)
|
||||
cmd := exec.Command(cmdLine[0], cmdLine[1:]...)
|
||||
start := time.Now()
|
||||
t.output, t.err = cmd.CombinedOutput()
|
||||
duration := time.Since(start)
|
||||
t.findFailures()
|
||||
if t.passed() {
|
||||
log.Printf("%q - Finished OK in %v (try %d/%d)", cmdString, duration, t.try, *maxTries)
|
||||
} else {
|
||||
log.Printf("%q - Finished ERROR in %v (try %d/%d): %v: Failed %v", cmdString, duration, t.try, *maxTries, t.err, t.failedTests)
|
||||
}
|
||||
}
|
||||
|
||||
// cleanFs runs a single clean fs for left over directories
|
||||
func (t *test) cleanFs() error {
|
||||
f, err := fs.NewFs(t.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dirs, err := fs.NewLister().SetLevel(1).Start(f, "").GetDirs()
|
||||
for _, dir := range dirs {
|
||||
if fstest.MatchTestRemote.MatchString(dir.Name) {
|
||||
log.Printf("Purging %s%s", t.remote, dir.Name)
|
||||
dir, err := fs.NewFs(t.remote + dir.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = fs.Purge(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// clean runs a single clean on a fs for left over directories
|
||||
func (t *test) clean() {
|
||||
log.Printf("%q - Starting clean (try %d/%d)", t.remote, t.try, *maxTries)
|
||||
start := time.Now()
|
||||
t.err = t.cleanFs()
|
||||
if t.err != nil {
|
||||
log.Printf("%q - Failed to purge %v", t.remote, t.err)
|
||||
}
|
||||
duration := time.Since(start)
|
||||
if t.passed() {
|
||||
log.Printf("%q - Finished OK in %v (try %d/%d)", t.cmdString, duration, t.try, *maxTries)
|
||||
} else {
|
||||
log.Printf("%q - Finished ERROR in %v (try %d/%d): %v", t.cmdString, duration, t.try, *maxTries, t.err)
|
||||
}
|
||||
}
|
||||
|
||||
// passed returns true if the test passed
|
||||
func (t *test) passed() bool {
|
||||
return t.err == nil
|
||||
}
|
||||
|
||||
// run runs all the trials for this test
|
||||
func (t *test) run(result chan<- *test) {
|
||||
for t.try = 1; t.try <= *maxTries; t.try++ {
|
||||
if *clean {
|
||||
if !t.subdir {
|
||||
t.clean()
|
||||
}
|
||||
} else {
|
||||
t.trial()
|
||||
}
|
||||
if t.passed() {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !t.passed() {
|
||||
t.dumpOutput()
|
||||
}
|
||||
result <- t
|
||||
}
|
||||
|
||||
// makeTestBinary makes the binary we will run
|
||||
func makeTestBinary() {
|
||||
if runtime.GOOS == "windows" {
|
||||
binary += ".exe"
|
||||
}
|
||||
log.Printf("Making test binary %q", binary)
|
||||
err := exec.Command("go", "test", "-c", "-o", binary).Run()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make test binary: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(binary); err != nil {
|
||||
log.Fatalf("Couldn't find test binary %q", binary)
|
||||
}
|
||||
}
|
||||
|
||||
// removeTestBinary removes the binary made in makeTestBinary
|
||||
func removeTestBinary() {
|
||||
err := os.Remove(binary) // Delete the binary when finished
|
||||
if err != nil {
|
||||
log.Printf("Error removing test binary %q: %v", binary, err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *runTests != "" {
|
||||
remotes = strings.Split(*runTests, ",")
|
||||
}
|
||||
log.Printf("Testing remotes: %s", strings.Join(remotes, ", "))
|
||||
|
||||
start := time.Now()
|
||||
if *clean {
|
||||
fs.LoadConfig()
|
||||
} else {
|
||||
makeTestBinary()
|
||||
defer removeTestBinary()
|
||||
}
|
||||
|
||||
// start the tests
|
||||
results := make(chan *test, 8)
|
||||
awaiting := 0
|
||||
for _, remote := range remotes {
|
||||
awaiting += 2
|
||||
go newTest(remote, false).run(results)
|
||||
go newTest(remote, true).run(results)
|
||||
}
|
||||
|
||||
// Wait for the tests to finish
|
||||
var failed []*test
|
||||
for ; awaiting > 0; awaiting-- {
|
||||
t := <-results
|
||||
if !t.passed() {
|
||||
failed = append(failed, t)
|
||||
}
|
||||
}
|
||||
duration := time.Since(start)
|
||||
|
||||
// Summarise results
|
||||
if len(failed) == 0 {
|
||||
log.Printf("PASS: All tests finished OK in %v", duration)
|
||||
} else {
|
||||
log.Printf("FAIL: %d tests failed in %v", len(failed), duration)
|
||||
for _, t := range failed {
|
||||
log.Printf(" * %s", t.cmdString)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
go install
|
||||
|
||||
REMOTES="
|
||||
TestSwift:
|
||||
TestS3:
|
||||
TestDrive:
|
||||
TestGoogleCloudStorage:
|
||||
TestDropbox:
|
||||
TestAmazonCloudDrive:
|
||||
"
|
||||
|
||||
REMOTES="
|
||||
TestAmazonCloudDrive:
|
||||
"
|
||||
|
||||
function test_remote {
|
||||
args=$@
|
||||
echo "@go test $args"
|
||||
go test $args || {
|
||||
echo "*** test $args FAILED ***"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
test_remote
|
||||
test_remote --subdir
|
||||
for remote in $REMOTES; do
|
||||
test_remote --remote $remote
|
||||
test_remote --remote $remote --subdir
|
||||
done
|
||||
|
||||
echo "All OK"
|
||||
4
fs/testdata/enc-invalid.conf
vendored
Normal file
4
fs/testdata/enc-invalid.conf
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# Encrypted rclone configuration File
|
||||
|
||||
RCLONE_ENCRYPT_V0:
|
||||
b5Uk6mE3cUn5Wb8xiWYnVBAxXUirAaEG1PO/GIDiO9274AOæøå+Yj790BwJA4d2y7lNkmHt4nJwIsoueFvUYmm7RDyzER8IA3XOCrjzl3OUcczZqcplk5JfBdhxMZpt1aGYWUdle1IgO/kAFne6sLD6IuxPySEb
|
||||
4
fs/testdata/enc-short.conf
vendored
Normal file
4
fs/testdata/enc-short.conf
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# Encrypted rclone configuration File
|
||||
|
||||
RCLONE_ENCRYPT_V0:
|
||||
b5Uk6mE3cUn5Wb8xi
|
||||
4
fs/testdata/enc-too-new.conf
vendored
Normal file
4
fs/testdata/enc-too-new.conf
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# Encrypted rclone configuration File
|
||||
|
||||
RCLONE_ENCRYPT_V1:
|
||||
b5Uk6mE3cUn5Wb8xiWYnVBAxXUirAaEG1PO/GIDiO9274AO+Yj790BwJA4d2y7lNkmHt4nJwIsoueFvUYmm7RDyzER8IA3XOCrjzl3OUcczZqcplk5JfBdhxMZpt1aGYWUdle1IgO/kAFne6sLD6IuxPySEb
|
||||
4
fs/testdata/encrypted.conf
vendored
Normal file
4
fs/testdata/encrypted.conf
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# Encrypted rclone configuration File
|
||||
|
||||
RCLONE_ENCRYPT_V0:
|
||||
b5Uk6mE3cUn5Wb8xiWYnVBAxXUirAaEG1PO/GIDiO9274AO+Yj790BwJA4d2y7lNkmHt4nJwIsoueFvUYmm7RDyzER8IA3XOCrjzl3OUcczZqcplk5JfBdhxMZpt1aGYWUdle1IgO/kAFne6sLD6IuxPySEb
|
||||
12
fs/testdata/plain.conf
vendored
Normal file
12
fs/testdata/plain.conf
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
[RCLONE_ENCRYPT_V0]
|
||||
type = local
|
||||
nounc = true
|
||||
|
||||
[nounc]
|
||||
type = local
|
||||
nounc = true
|
||||
|
||||
|
||||
[unc]
|
||||
type = local
|
||||
nounc = false
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// Version of rclone
|
||||
const Version = "v1.23"
|
||||
var Version = "v1.32"
|
||||
|
||||
154
fstest/fstest.go
154
fstest/fstest.go
@@ -4,16 +4,29 @@ package fstest
|
||||
// FIXME put name of test FS in Fs structure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
// MatchTestRemote matches the remote names used for testing
|
||||
MatchTestRemote = regexp.MustCompile(`^rclone-test-[abcdefghijklmnopqrstuvwxyz0123456789]{24}$`)
|
||||
listRetries = flag.Int("list-retries", 6, "Number or times to retry listing")
|
||||
)
|
||||
|
||||
// Seed the random number generator
|
||||
@@ -25,12 +38,29 @@ func init() {
|
||||
// Item represents an item for checking
|
||||
type Item struct {
|
||||
Path string
|
||||
Md5sum string
|
||||
Hashes map[fs.HashType]string
|
||||
ModTime time.Time
|
||||
Size int64
|
||||
WinPath string
|
||||
}
|
||||
|
||||
// NewItem creates an item from a string content
|
||||
func NewItem(Path, Content string, modTime time.Time) Item {
|
||||
i := Item{
|
||||
Path: Path,
|
||||
ModTime: modTime,
|
||||
Size: int64(len(Content)),
|
||||
}
|
||||
hash := fs.NewMultiHasher()
|
||||
buf := bytes.NewBufferString(Content)
|
||||
_, err := io.Copy(hash, buf)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create item: %v", err)
|
||||
}
|
||||
i.Hashes = hash.Sums()
|
||||
return i
|
||||
}
|
||||
|
||||
// CheckTimeEqualWithPrecision checks the times are equal within the
|
||||
// precision, returns the delta and a flag
|
||||
func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
|
||||
@@ -44,27 +74,25 @@ func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (tim
|
||||
// CheckModTime checks the mod time to the given precision
|
||||
func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
|
||||
dt, ok := CheckTimeEqualWithPrecision(modTime, i.ModTime, precision)
|
||||
if !ok {
|
||||
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", obj.Remote(), dt, precision, modTime, i.ModTime, precision)
|
||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", obj.Remote(), dt, precision, modTime, i.ModTime, precision))
|
||||
}
|
||||
|
||||
// CheckHashes checks all the hashes the object supports are correct
|
||||
func (i *Item) CheckHashes(t *testing.T, obj fs.Object) {
|
||||
require.NotNil(t, obj)
|
||||
types := obj.Fs().Hashes().Array()
|
||||
for _, hash := range types {
|
||||
// Check attributes
|
||||
sum, err := obj.Hash(hash)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, fs.HashEquals(i.Hashes[hash], sum), fmt.Sprintf("%s/%s: %v hash incorrect - expecting %q got %q", obj.Fs().String(), obj.Remote(), hash, i.Hashes[hash], sum))
|
||||
}
|
||||
}
|
||||
|
||||
// Check checks all the attributes of the object are correct
|
||||
func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
|
||||
if obj == nil {
|
||||
t.Fatalf("Object is nil")
|
||||
}
|
||||
// Check attributes
|
||||
Md5sum, err := obj.Md5sum()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
|
||||
}
|
||||
if !fs.Md5sumsEqual(i.Md5sum, Md5sum) {
|
||||
t.Errorf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
|
||||
}
|
||||
if i.Size != obj.Size() {
|
||||
t.Errorf("%s: Size incorrect - expecting %d got %d", obj.Remote(), i.Size, obj.Size())
|
||||
}
|
||||
i.CheckHashes(t, obj)
|
||||
assert.Equal(t, i.Size, obj.Size())
|
||||
i.CheckModTime(t, obj, obj.ModTime(), precision)
|
||||
}
|
||||
|
||||
@@ -95,24 +123,23 @@ func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
|
||||
i, ok := is.byName[obj.Remote()]
|
||||
if !ok {
|
||||
i, ok = is.byNameAlt[obj.Remote()]
|
||||
if !ok {
|
||||
t.Errorf("Unexpected file %q", obj.Remote())
|
||||
return
|
||||
}
|
||||
assert.True(t, ok, fmt.Sprintf("Unexpected file %q", obj.Remote()))
|
||||
}
|
||||
if i != nil {
|
||||
delete(is.byName, i.Path)
|
||||
delete(is.byName, i.WinPath)
|
||||
i.Check(t, obj, precision)
|
||||
}
|
||||
delete(is.byName, i.Path)
|
||||
delete(is.byName, i.WinPath)
|
||||
i.Check(t, obj, precision)
|
||||
}
|
||||
|
||||
// Done checks all finished
|
||||
func (is *Items) Done(t *testing.T) {
|
||||
if len(is.byName) != 0 {
|
||||
for name := range is.byName {
|
||||
log.Printf("Not found %q", name)
|
||||
t.Logf("Not found %q", name)
|
||||
}
|
||||
t.Errorf("%d objects not found", len(is.byName))
|
||||
}
|
||||
assert.Equal(t, 0, len(is.byName), fmt.Sprintf("%d objects not found", len(is.byName)))
|
||||
}
|
||||
|
||||
// CheckListingWithPrecision checks the fs to see if it has the
|
||||
@@ -121,22 +148,30 @@ func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, precision ti
|
||||
is := NewItems(items)
|
||||
oldErrors := fs.Stats.GetErrors()
|
||||
var objs []fs.Object
|
||||
for i := 1; i <= 5; i++ {
|
||||
objs = nil
|
||||
for obj := range f.List() {
|
||||
objs = append(objs, obj)
|
||||
var err error
|
||||
var retries = *listRetries
|
||||
sleep := time.Second / 2
|
||||
for i := 1; i <= retries; i++ {
|
||||
objs, err = fs.NewLister().Start(f, "").GetObjects()
|
||||
if err != nil && err != fs.ErrorDirNotFound {
|
||||
t.Fatalf("Error listing: %v", err)
|
||||
}
|
||||
if len(objs) == len(items) {
|
||||
// Put an extra sleep in if we did any retries just to make sure it really
|
||||
// is consistent (here is looking at you Amazon Drive!)
|
||||
if i != 1 {
|
||||
extraSleep := 5*time.Second + sleep
|
||||
t.Logf("Sleeping for %v just to make sure", extraSleep)
|
||||
time.Sleep(extraSleep)
|
||||
}
|
||||
break
|
||||
}
|
||||
t.Logf("Sleeping for 1 second for list eventual consistency: %d/5", i)
|
||||
time.Sleep(1 * time.Second)
|
||||
sleep *= 2
|
||||
t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries)
|
||||
time.Sleep(sleep)
|
||||
}
|
||||
for _, obj := range objs {
|
||||
if obj == nil {
|
||||
t.Errorf("Unexpected nil in List()")
|
||||
continue
|
||||
}
|
||||
require.NotNil(t, obj)
|
||||
is.Find(t, obj, precision)
|
||||
}
|
||||
is.Done(t)
|
||||
@@ -152,6 +187,12 @@ func CheckListing(t *testing.T, f fs.Fs, items []Item) {
|
||||
CheckListingWithPrecision(t, f, items, precision)
|
||||
}
|
||||
|
||||
// CheckItems checks the fs to see if it has only the items passed in
|
||||
// using a precision of fs.Config.ModifyWindow
|
||||
func CheckItems(t *testing.T, f fs.Fs, items ...Item) {
|
||||
CheckListingWithPrecision(t, f, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Time parses a time string or logs a fatal error
|
||||
func Time(timeString string) time.Time {
|
||||
t, err := time.Parse(time.RFC3339Nano, timeString)
|
||||
@@ -163,9 +204,17 @@ func Time(timeString string) time.Time {
|
||||
|
||||
// RandomString create a random string for test purposes
|
||||
func RandomString(n int) string {
|
||||
source := "abcdefghijklmnopqrstuvwxyz0123456789"
|
||||
const (
|
||||
vowel = "aeiou"
|
||||
consonant = "bcdfghjklmnpqrstvwxyz"
|
||||
digit = "0123456789"
|
||||
)
|
||||
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
|
||||
out := make([]byte, n)
|
||||
p := 0
|
||||
for i := range out {
|
||||
source := pattern[p]
|
||||
p = (p + 1) % len(pattern)
|
||||
out[i] = source[rand.Intn(len(source))]
|
||||
}
|
||||
return string(out)
|
||||
@@ -199,7 +248,10 @@ func RandomRemoteName(remoteName string) (string, string, error) {
|
||||
if !strings.HasSuffix(remoteName, ":") {
|
||||
remoteName += "/"
|
||||
}
|
||||
leafName = RandomString(32)
|
||||
leafName = "rclone-test-" + RandomString(24)
|
||||
if !MatchTestRemote.MatchString(leafName) {
|
||||
log.Fatalf("%q didn't match the test remote name regexp", leafName)
|
||||
}
|
||||
remoteName += leafName
|
||||
}
|
||||
return remoteName, leafName, nil
|
||||
@@ -209,26 +261,28 @@ func RandomRemoteName(remoteName string) (string, string, error) {
|
||||
//
|
||||
// Call the finalise function returned to Purge the fs at the end (and
|
||||
// the parent if necessary)
|
||||
func RandomRemote(remoteName string, subdir bool) (fs.Fs, func(), error) {
|
||||
//
|
||||
// Returns the remote, its url, a finaliser and an error
|
||||
func RandomRemote(remoteName string, subdir bool) (fs.Fs, string, func(), error) {
|
||||
var err error
|
||||
var parentRemote fs.Fs
|
||||
|
||||
remoteName, _, err = RandomRemoteName(remoteName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, "", nil, err
|
||||
}
|
||||
|
||||
if subdir {
|
||||
parentRemote, err = fs.NewFs(remoteName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, "", nil, err
|
||||
}
|
||||
remoteName += "/" + RandomString(8)
|
||||
remoteName += "/rclone-test-subdir-" + RandomString(8)
|
||||
}
|
||||
|
||||
remote, err := fs.NewFs(remoteName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, "", nil, err
|
||||
}
|
||||
|
||||
finalise := func() {
|
||||
@@ -241,31 +295,25 @@ func RandomRemote(remoteName string, subdir bool) (fs.Fs, func(), error) {
|
||||
}
|
||||
}
|
||||
|
||||
return remote, finalise, nil
|
||||
return remote, remoteName, finalise, nil
|
||||
}
|
||||
|
||||
// TestMkdir tests Mkdir works
|
||||
func TestMkdir(t *testing.T, remote fs.Fs) {
|
||||
err := fs.Mkdir(remote)
|
||||
if err != nil {
|
||||
t.Fatalf("Mkdir failed: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
CheckListing(t, remote, []Item{})
|
||||
}
|
||||
|
||||
// TestPurge tests Purge works
|
||||
func TestPurge(t *testing.T, remote fs.Fs) {
|
||||
err := fs.Purge(remote)
|
||||
if err != nil {
|
||||
t.Fatalf("Purge failed: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
CheckListing(t, remote, []Item{})
|
||||
}
|
||||
|
||||
// TestRmdir tests Rmdir works
|
||||
func TestRmdir(t *testing.T, remote fs.Fs) {
|
||||
err := fs.Rmdir(remote)
|
||||
if err != nil {
|
||||
t.Fatalf("Rmdir failed: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user