mirror of
https://github.com/rclone/rclone.git
synced 2026-02-04 02:33:44 +00:00
Compare commits
253 Commits
hensur-bug
...
onedrive-s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
108ec53c0c | ||
|
|
64e303321b | ||
|
|
7d87386d58 | ||
|
|
beb773d20c | ||
|
|
5babf2dc5c | ||
|
|
9012d7c6c1 | ||
|
|
df1faa9a8f | ||
|
|
3de7ad5223 | ||
|
|
9cb3a68c38 | ||
|
|
c1dd76788d | ||
|
|
5ee1816a71 | ||
|
|
63b51c6742 | ||
|
|
e7684b7ed5 | ||
|
|
dda23baf42 | ||
|
|
8575abf599 | ||
|
|
feea0532cd | ||
|
|
d3e8ae1820 | ||
|
|
91a9a959a2 | ||
|
|
04eae51d11 | ||
|
|
8fb707e16d | ||
|
|
4138d5aa75 | ||
|
|
fc654a4cec | ||
|
|
26b5f55cba | ||
|
|
3f572e6bf2 | ||
|
|
941ad6bc62 | ||
|
|
5d1d93e163 | ||
|
|
35fba5bfdd | ||
|
|
887834da91 | ||
|
|
107293c80e | ||
|
|
e3c4ebd59a | ||
|
|
d99ffde7c0 | ||
|
|
198c34ce21 | ||
|
|
0eba88bbfe | ||
|
|
aeea4430d5 | ||
|
|
4b15c4215c | ||
|
|
50452207d9 | ||
|
|
01fcad9b9c | ||
|
|
eb41253764 | ||
|
|
89625e54cf | ||
|
|
58f7141c96 | ||
|
|
e56c6402a7 | ||
|
|
d0eb8ddc30 | ||
|
|
a6c28a5faa | ||
|
|
d35bd15762 | ||
|
|
8b8220c4f7 | ||
|
|
5fe3b0ad71 | ||
|
|
4c8c87a935 | ||
|
|
bb10a51b39 | ||
|
|
df01f7a4eb | ||
|
|
e84790ef79 | ||
|
|
369a8ee17b | ||
|
|
84e21ade6b | ||
|
|
703b0535a4 | ||
|
|
155264ae12 | ||
|
|
31e2ce03c3 | ||
|
|
e969505ae4 | ||
|
|
26e2f1a998 | ||
|
|
2682d5a9cf | ||
|
|
2191592e80 | ||
|
|
18f758294e | ||
|
|
f95c1c61dd | ||
|
|
8c8dcdd521 | ||
|
|
141c133818 | ||
|
|
0f03e55cd1 | ||
|
|
9e6ba92a11 | ||
|
|
762561f88e | ||
|
|
084fe38922 | ||
|
|
63a2a935fc | ||
|
|
64fce8438b | ||
|
|
f92beb4e14 | ||
|
|
f7ce2e8d95 | ||
|
|
3975d82b3b | ||
|
|
d87aa33ec5 | ||
|
|
1b78f4d1ea | ||
|
|
b3704597f3 | ||
|
|
16f797a7d7 | ||
|
|
ee700ec01a | ||
|
|
9b3c951ab7 | ||
|
|
22d17e79e3 | ||
|
|
6d3088a00b | ||
|
|
84202c7471 | ||
|
|
96a05516f9 | ||
|
|
4f6a942595 | ||
|
|
c4b0a37b21 | ||
|
|
9322f4baef | ||
|
|
fa0a1e7261 | ||
|
|
4ad08794c9 | ||
|
|
c0f600764b | ||
|
|
f139e07380 | ||
|
|
c6786eeb2d | ||
|
|
57b85b8155 | ||
|
|
2b1194c57e | ||
|
|
e6dd121f52 | ||
|
|
e600217666 | ||
|
|
bc17ca7ed9 | ||
|
|
1916410316 | ||
|
|
dddfbec92a | ||
|
|
75a88de55c | ||
|
|
2466f4d152 | ||
|
|
39283c8a35 | ||
|
|
46c2f55545 | ||
|
|
fc2afcbcbd | ||
|
|
fa0a9653d2 | ||
|
|
181267e20e | ||
|
|
75e8ea383c | ||
|
|
8c8b58a7de | ||
|
|
b961e07c57 | ||
|
|
0b80d1481a | ||
|
|
89550e7121 | ||
|
|
370c218c63 | ||
|
|
b972dcb0ae | ||
|
|
0bfa9811f7 | ||
|
|
aa9b2c31f4 | ||
|
|
cff75db6a4 | ||
|
|
75252e4a89 | ||
|
|
2089405e1b | ||
|
|
a379eec9d9 | ||
|
|
45d5339fcb | ||
|
|
bb5637d46a | ||
|
|
1f05d5bf4a | ||
|
|
ff87da9c3b | ||
|
|
3d81b75f44 | ||
|
|
baba6d67e6 | ||
|
|
04c0564fe2 | ||
|
|
91cfdb81f5 | ||
|
|
deae7bf33c | ||
|
|
04a0da1f92 | ||
|
|
9486df0226 | ||
|
|
948a5d25c2 | ||
|
|
f7c31cd210 | ||
|
|
696e7b2833 | ||
|
|
e76cf1217f | ||
|
|
543e37f662 | ||
|
|
c514cb752d | ||
|
|
c0ca93ae6f | ||
|
|
38a89d49ae | ||
|
|
6531126eb2 | ||
|
|
25d0e59ef8 | ||
|
|
b0db08fd2b | ||
|
|
07addf74fd | ||
|
|
52c7c738ca | ||
|
|
5c32b32011 | ||
|
|
fe61cff079 | ||
|
|
fbab1e55bb | ||
|
|
1bfd07567e | ||
|
|
f97c4c8d9d | ||
|
|
a3c55462a8 | ||
|
|
bbb9a504a8 | ||
|
|
dedc7d885c | ||
|
|
c5ac96e9e7 | ||
|
|
9959c5f17f | ||
|
|
e8d0a363fc | ||
|
|
935b7c1c0f | ||
|
|
15ce0ae57c | ||
|
|
67703a73de | ||
|
|
f96ce5674b | ||
|
|
7f0b204292 | ||
|
|
83b1ae4833 | ||
|
|
753cc63d96 | ||
|
|
5dac8e055f | ||
|
|
c3a8eb1c10 | ||
|
|
0f2a5403db | ||
|
|
dcce84714e | ||
|
|
eb8130f48a | ||
|
|
aa58f66806 | ||
|
|
a3dc591b8e | ||
|
|
5ee1bd7ba4 | ||
|
|
dbedf33b9f | ||
|
|
0f02c9540c | ||
|
|
06922674c8 | ||
|
|
8ad7da066c | ||
|
|
e1503add41 | ||
|
|
6fea75afde | ||
|
|
6a773289e7 | ||
|
|
ade252f13b | ||
|
|
bb2e361004 | ||
|
|
b24facb73d | ||
|
|
014d58a757 | ||
|
|
1d16e16b30 | ||
|
|
249a523dd3 | ||
|
|
8d72ef8d1e | ||
|
|
bc8f0208aa | ||
|
|
ee25b6106a | ||
|
|
5c1b135304 | ||
|
|
2f2029fed5 | ||
|
|
57273d364b | ||
|
|
84289d1d69 | ||
|
|
98e2746e31 | ||
|
|
c00ec0cbe4 | ||
|
|
1a40bceb1d | ||
|
|
411a6cc472 | ||
|
|
1e2676df26 | ||
|
|
364fca5cea | ||
|
|
87e1efa997 | ||
|
|
6709084e2f | ||
|
|
6b1f915ebc | ||
|
|
78b9bd77f5 | ||
|
|
a9273c5da5 | ||
|
|
14128656db | ||
|
|
1557287c64 | ||
|
|
e7e467fb3a | ||
|
|
5fde7d8b12 | ||
|
|
3c086f5f7f | ||
|
|
c0084f43dd | ||
|
|
ddbd4fd881 | ||
|
|
7826e39fcf | ||
|
|
06ae4258be | ||
|
|
d9037fe2be | ||
|
|
1d14972e41 | ||
|
|
05fa9cb379 | ||
|
|
59e14c25df | ||
|
|
fc640d3a09 | ||
|
|
e1f67295b4 | ||
|
|
22ac80e83a | ||
|
|
c7aa6b587b | ||
|
|
8d1848bebe | ||
|
|
527c0af1c3 | ||
|
|
a20fae0364 | ||
|
|
15b1a1f909 | ||
|
|
80b25daac7 | ||
|
|
70b30d5ca4 | ||
|
|
0b2fc621fc | ||
|
|
171e39b230 | ||
|
|
690a44e40e | ||
|
|
d9a3b26e47 | ||
|
|
1eec59e091 | ||
|
|
96ce49ec4e | ||
|
|
ae63e4b4f0 | ||
|
|
e2fb588eb9 | ||
|
|
382a6863b5 | ||
|
|
7b975bc1ff | ||
|
|
467fe30a5e | ||
|
|
4415aa5c2e | ||
|
|
17ab38502d | ||
|
|
9fa8c959ee | ||
|
|
f29c6049fc | ||
|
|
e44fa5db8e | ||
|
|
03ea05b860 | ||
|
|
b8678c9d4b | ||
|
|
13823a7743 | ||
|
|
b94d87ae2d | ||
|
|
e0c5f7ff1b | ||
|
|
b22ecbe174 | ||
|
|
c41be436c6 | ||
|
|
e022ffce0f | ||
|
|
cfe65f1e72 | ||
|
|
b18595ae07 | ||
|
|
d27630626a | ||
|
|
c473c7cb53 | ||
|
|
ef3526b3b8 | ||
|
|
d4ee7277c0 | ||
|
|
4a3efa5d45 | ||
|
|
a14f0d46d7 |
31
.github/ISSUE_TEMPLATE.md
vendored
Normal file
31
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
<!--
|
||||
|
||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue for a quick response.
|
||||
|
||||
If you are reporting a bug or asking for a new feature then please use one of the templates here:
|
||||
|
||||
https://github.com/ncw/rclone/issues/new
|
||||
|
||||
otherwise fill in the form below.
|
||||
|
||||
Thank you
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
|
||||
#### Output of `rclone version`
|
||||
|
||||
|
||||
|
||||
#### Describe the issue
|
||||
|
||||
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a problem with rclone
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
Hi!
|
||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||
|
||||
We understand you are having a problem with rclone or have an idea for an improvement - we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue. We'll reply quickly and it won't increase our massive issue backlog.
|
||||
instead of filing an issue for a quick response.
|
||||
|
||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
||||
|
||||
@@ -16,9 +19,7 @@ If you think you might have found a bug, please can you try to replicate it with
|
||||
|
||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
||||
|
||||
If you have an idea for an improvement, then please search the old issues first and if you don't find your idea, make a new issue.
|
||||
|
||||
Thanks
|
||||
Thank you
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
@@ -27,17 +28,23 @@ The Rclone Developers
|
||||
#### What is the problem you are having with rclone?
|
||||
|
||||
|
||||
#### What is your rclone version (eg output from `rclone -V`)
|
||||
|
||||
#### What is your rclone version (output from `rclone version`)
|
||||
|
||||
|
||||
|
||||
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
|
||||
|
||||
|
||||
#### Which cloud storage system are you using? (eg Google Drive)
|
||||
|
||||
|
||||
|
||||
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
|
||||
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
36
.github/ISSUE_TEMPLATE/Feature.md
vendored
Normal file
36
.github/ISSUE_TEMPLATE/Feature.md
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest a new feature or enhancement for rclone
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
Welcome :-)
|
||||
|
||||
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||
|
||||
Here is a checklist of things to do:
|
||||
|
||||
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
|
||||
2. Discuss on the forum first: https://forum.rclone.org/
|
||||
3. Make a feature request issue (this is the right place!).
|
||||
4. Be prepared to get involved making the feature :-)
|
||||
|
||||
Looking forward to your great idea!
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
|
||||
#### What is your current rclone version (output from `rclone version`)?
|
||||
|
||||
|
||||
|
||||
#### What problem are you are trying to solve?
|
||||
|
||||
|
||||
|
||||
#### How do you think rclone should be changed to solve that?
|
||||
|
||||
|
||||
29
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
29
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
<!--
|
||||
Thank you very much for contributing code or documentation to rclone! Please
|
||||
fill out the following questions to make it easier for us to review your
|
||||
changes.
|
||||
|
||||
You do not need to check all the boxes below all at once, feel free to take
|
||||
your time and add more commits. If you're done and ready for review, please
|
||||
check the last box.
|
||||
-->
|
||||
|
||||
#### What is the purpose of this change?
|
||||
|
||||
<!--
|
||||
Describe the changes here
|
||||
-->
|
||||
|
||||
#### Was the change discussed in an issue or in the forum before?
|
||||
|
||||
<!--
|
||||
Link issues and relevant forum posts here.
|
||||
-->
|
||||
|
||||
#### Checklist
|
||||
|
||||
- [ ] I have read the [contribution guidelines](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||
- [ ] I have added tests for all changes in this PR if appropriate.
|
||||
- [ ] I have added documentation for the changes if appropriate.
|
||||
- [ ] All commit messages are in [house style](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||
- [ ] I'm done, this Pull Request is ready for review :-)
|
||||
@@ -4,7 +4,6 @@ dist: trusty
|
||||
os:
|
||||
- linux
|
||||
go:
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
|
||||
@@ -21,14 +21,14 @@ with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
## Submitting a pull request ##
|
||||
|
||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||
like to implement then please submit a pull request via Github.
|
||||
like to implement then please submit a pull request via GitHub.
|
||||
|
||||
If it is a big feature then make an issue first so it can be discussed.
|
||||
|
||||
You'll need a Go environment set up with GOPATH set. See [the Go
|
||||
getting started docs](https://golang.org/doc/install) for more info.
|
||||
|
||||
First in your web browser press the fork button on [rclone's Github
|
||||
First in your web browser press the fork button on [rclone's GitHub
|
||||
page](https://github.com/ncw/rclone).
|
||||
|
||||
Now in your terminal
|
||||
@@ -64,22 +64,23 @@ packages which you can install with
|
||||
|
||||
Make sure you
|
||||
|
||||
* Add documentation for a new feature (see below for where)
|
||||
* Add unit tests for a new feature
|
||||
* Add [documentation](#writing-documentation) for a new feature.
|
||||
* Follow the [commit message guidelines](#commit-messages).
|
||||
* Add [unit tests](#testing) for a new feature
|
||||
* squash commits down to one per feature
|
||||
* rebase to master `git rebase master`
|
||||
* rebase to master with `git rebase master`
|
||||
|
||||
When you are done with that
|
||||
|
||||
git push origin my-new-feature
|
||||
|
||||
Go to the Github website and click [Create pull
|
||||
Go to the GitHub website and click [Create pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
You patch will get reviewed and you might get asked to fix some stuff.
|
||||
|
||||
If so, then make the changes in the same branch, squash the commits,
|
||||
rebase it to master then push it to Github with `--force`.
|
||||
rebase it to master then push it to GitHub with `--force`.
|
||||
|
||||
## Enabling CI for your fork ##
|
||||
|
||||
@@ -122,6 +123,13 @@ but they can be run against any of the remotes.
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
|
||||
If you want to use the integration test framework to run these tests
|
||||
all together with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/ncw/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
|
||||
@@ -181,10 +189,14 @@ with modules beneath.
|
||||
|
||||
If you are adding a new feature then please update the documentation.
|
||||
|
||||
If you add a new flag, then if it is a general flag, document it in
|
||||
If you add a new general flag (not for a backend), then document it in
|
||||
`docs/content/docs.md` - the flags there are supposed to be in
|
||||
alphabetical order. If it is a remote specific flag, then document it
|
||||
in `docs/content/remote.md`.
|
||||
alphabetical order.
|
||||
|
||||
If you add a new backend option/flag, then it should be documented in
|
||||
the source file in the `Help:` field. The first line of this is used
|
||||
for the flag help, the remainder is shown to the user in `rclone
|
||||
config` and is added to the docs with `make backenddocs`.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The MANUAL.*, rclone.1, web site etc are all auto generated
|
||||
@@ -203,14 +215,20 @@ file.
|
||||
## Commit messages ##
|
||||
|
||||
Please make the first line of your commit message a summary of the
|
||||
change, and prefix it with the directory of the change followed by a
|
||||
colon. The changelog gets made by looking at just these first lines
|
||||
so make it good!
|
||||
change that a user (not a developer) of rclone would like to read, and
|
||||
prefix it with the directory of the change followed by a colon. The
|
||||
changelog gets made by looking at just these first lines so make it
|
||||
good!
|
||||
|
||||
If you have more to say about the commit, then enter a blank line and
|
||||
carry on the description. Remember to say why the change was needed -
|
||||
the commit itself shows what was changed.
|
||||
|
||||
Writing more is better than less. Comparing the behaviour before the
|
||||
change to that after the change is very useful. Imagine you are
|
||||
writing to yourself in 12 months time when you've forgotten everything
|
||||
about what you just did and you need to get up to speed quickly.
|
||||
|
||||
If the change fixes an issue then write `Fixes #1234` in the commit
|
||||
message. This can be on the subject line if it will fit. If you
|
||||
don't want to close the associated issue just put `#1234` and the
|
||||
@@ -258,9 +276,8 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
||||
instructions below. These will fetch the dependency, add it to
|
||||
`go.mod` and `go.sum` and vendor it for older go versions.
|
||||
|
||||
export GO111MODULE=on
|
||||
go get github.com/ncw/new_dependency
|
||||
go mod vendor
|
||||
GO111MODULE=on go get github.com/ncw/new_dependency
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
You can add constraints on that package when doing `go get` (see the
|
||||
go docs linked above), but don't unless you really need to.
|
||||
@@ -275,9 +292,8 @@ in `vendor`.
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
export GO111MODULE=on
|
||||
go get -u github.com/pkg/errors
|
||||
go mod vendor
|
||||
GO111MODULE=on go get -u github.com/pkg/errors
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
Check in in a single commit as above.
|
||||
|
||||
@@ -334,7 +350,7 @@ Unit tests
|
||||
|
||||
Integration tests
|
||||
|
||||
* Add your fs to `fstest/test_all/test_all.go`
|
||||
* Add your backend to `fstest/test_all/config.yaml`
|
||||
* Make sure integration tests pass with
|
||||
* `cd fs/operations`
|
||||
* `go test -v -remote TestRemote:`
|
||||
@@ -349,8 +365,8 @@ See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
|
||||
|
||||
* `README.md` - main Github page
|
||||
* `docs/content/remote.md` - main docs page
|
||||
* `README.md` - main GitHub page
|
||||
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
* `docs/content/overview.md` - overview docs
|
||||
* `docs/content/docs.md` - list of remotes in config section
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
|
||||
@@ -58,7 +58,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
|
||||
|
||||
Try to process pull requests promptly!
|
||||
|
||||
Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
|
||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||
|
||||
|
||||
4411
MANUAL.html
4411
MANUAL.html
File diff suppressed because it is too large
Load Diff
4484
MANUAL.txt
4484
MANUAL.txt
File diff suppressed because it is too large
Load Diff
21
Makefile
21
Makefile
@@ -50,10 +50,9 @@ version:
|
||||
|
||||
# Full suite of integration tests
|
||||
test: rclone
|
||||
go install github.com/ncw/rclone/fstest/test_all
|
||||
-go test -v -count 1 $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
|
||||
-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
|
||||
@echo "Written logs in test.log and fs/test_all.log"
|
||||
go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all
|
||||
-test_all 2>&1 | tee test_all.log
|
||||
@echo "Written logs in test_all.log"
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
@@ -88,7 +87,7 @@ build_dep:
|
||||
ifdef FULL_TESTS
|
||||
go get -u github.com/kisielk/errcheck
|
||||
go get -u golang.org/x/tools/cmd/goimports
|
||||
go get -u github.com/golang/lint/golint
|
||||
go get -u golang.org/x/lint/golint
|
||||
endif
|
||||
|
||||
# Get the release dependencies
|
||||
@@ -99,15 +98,15 @@ release_dep:
|
||||
# Update dependencies
|
||||
update:
|
||||
GO111MODULE=on go get -u ./...
|
||||
GO111MODULE=on go tidy
|
||||
GO111MODULE=on go vendor
|
||||
GO111MODULE=on go mod tidy
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
rclone.1: MANUAL.md
|
||||
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
|
||||
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
|
||||
./bin/make_manual.py
|
||||
|
||||
MANUAL.html: MANUAL.md
|
||||
@@ -117,7 +116,10 @@ MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
rclone gendocs docs/content/commands/
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
./bin/make_backend_docs.py
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
@@ -229,4 +231,3 @@ startdev:
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
|
||||
100
README.md
100
README.md
@@ -2,10 +2,11 @@
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
[Download](https://rclone.org/downloads/) |
|
||||
[Contributing](CONTRIBUTING.md) |
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
[Forum](https://forum.rclone.org/)
|
||||
[Forum](https://forum.rclone.org/) |
|
||||
[G+](https://google.com/+RcloneOrg)
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone)
|
||||
@@ -13,50 +14,81 @@
|
||||
[](https://circleci.com/gh/ncw/rclone/tree/master)
|
||||
[](https://godoc.org/github.com/ncw/rclone)
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
# Rclone
|
||||
|
||||
* Amazon Drive ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
|
||||
* Backblaze B2
|
||||
* Box
|
||||
* Dropbox
|
||||
* FTP
|
||||
* Google Cloud Storage
|
||||
* Google Drive
|
||||
* HTTP
|
||||
* Hubic
|
||||
* Jottacloud
|
||||
* Mega
|
||||
* Microsoft Azure Blob Storage
|
||||
* Microsoft OneDrive
|
||||
* OpenDrive
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
|
||||
* pCloud
|
||||
* QingStor
|
||||
* SFTP
|
||||
* Webdav / Owncloud / Nextcloud
|
||||
* Yandex Disk
|
||||
* The local filesystem
|
||||
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
|
||||
|
||||
Features
|
||||
## Storage providers
|
||||
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* Openstack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
|
||||
## Features
|
||||
|
||||
* MD5/SHA1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* Copy mode to just copy new/changed files
|
||||
* Sync (one way) mode to make a directory identical
|
||||
* Check mode to check for file hash equality
|
||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, eg two different cloud accounts
|
||||
* Optional encryption (Crypt)
|
||||
* Optional FUSE mount
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
|
||||
See the home page for installation, usage, documentation, changelog
|
||||
and configuration walkthroughs.
|
||||
## Installation & documentation
|
||||
|
||||
* https://rclone.org/
|
||||
Please see the [rclone website](https://rclone.org/) for:
|
||||
|
||||
* [Installation](https://rclone.org/install/)
|
||||
* [Documentation & configuration](https://rclone.org/docs/)
|
||||
* [Changelog](https://rclone.org/changelog/)
|
||||
* [FAQ](https://rclone.org/faq/)
|
||||
* [Storage providers](https://rclone.org/overview/)
|
||||
* [Forum](https://forum.rclone.org/)
|
||||
* ...and more
|
||||
|
||||
## Downloads
|
||||
|
||||
* https://rclone.org/downloads/
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
COPYING file included in this package).
|
||||
[COPYING file](/rclone/COPYING) included in this package).
|
||||
|
||||
15
RELEASE.md
15
RELEASE.md
@@ -32,6 +32,21 @@ Early in the next release cycle update the vendored dependencies
|
||||
* git add new files
|
||||
* git commit -a -v
|
||||
|
||||
If `make update` fails with errors like this:
|
||||
|
||||
```
|
||||
# github.com/cpuguy83/go-md2man/md2man
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
||||
```
|
||||
|
||||
Can be fixed with
|
||||
|
||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
||||
* GO111MODULE=on go mod tidy
|
||||
* GO111MODULE=on go mod vendor
|
||||
|
||||
|
||||
Making a point release. If rclone needs a point release due to some
|
||||
horrendous bug, then
|
||||
* git branch v1.XX v1.XX-fixes
|
||||
|
||||
@@ -2,13 +2,12 @@ package alias
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -47,13 +46,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(opt.Remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
_, configName, fsPath, err := fs.ParseRemote(opt.Remote)
|
||||
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = path.Join(fsPath, filepath.ToSlash(root))
|
||||
if configName == "local" {
|
||||
return fs.NewFs(root)
|
||||
}
|
||||
return fs.NewFs(configName + ":" + root)
|
||||
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/go-acd"
|
||||
acd "github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
@@ -97,13 +97,42 @@ func init() {
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_wait_per_gb",
|
||||
Help: "Additional time per GB to wait after a failed complete upload to see if it appears.",
|
||||
Name: "upload_wait_per_gb",
|
||||
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
|
||||
|
||||
Sometimes Amazon Drive gives an error when a file has been fully
|
||||
uploaded but the file appears anyway after a little while. This
|
||||
happens sometimes for files over 1GB in size and nearly every time for
|
||||
files bigger than 10GB. This parameter controls the time rclone waits
|
||||
for the file to appear.
|
||||
|
||||
The default value for this parameter is 3 minutes per GB, so by
|
||||
default it will wait 3 minutes for every GB uploaded to see if the
|
||||
file appears.
|
||||
|
||||
You can disable this feature by setting it to 0. This may cause
|
||||
conflict errors as rclone retries the failed upload but the file will
|
||||
most likely appear correctly eventually.
|
||||
|
||||
These values were determined empirically by observing lots of uploads
|
||||
of big files for a range of file sizes.
|
||||
|
||||
Upload with the "-v" flag to see more info about what rclone is doing
|
||||
in this situation.`,
|
||||
Default: fs.Duration(180 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "templink_threshold",
|
||||
Help: "Files >= this size will be downloaded via their tempLink.",
|
||||
Name: "templink_threshold",
|
||||
Help: `Files >= this size will be downloaded via their tempLink.
|
||||
|
||||
Files this size or more will be downloaded via their "tempLink". This
|
||||
is to work around a problem with Amazon Drive which blocks downloads
|
||||
of files bigger than about 10GB. The default for this is 9GB which
|
||||
shouldn't need to be changed.
|
||||
|
||||
To download files above this threshold, rclone requests a "tempLink"
|
||||
which downloads the file through a temporary URL directly from the
|
||||
underlying S3 storage.`,
|
||||
Default: defaultTempLinkThreshold,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -235,7 +264,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Amazon Drive: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
||||
}
|
||||
|
||||
c := acd.NewClient(oAuthClient)
|
||||
@@ -283,16 +312,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, f.trueRootID, &newF)
|
||||
newF.root = newRoot
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -300,8 +329,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -1240,24 +1274,38 @@ func (o *Object) MimeType() string {
|
||||
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
checkpoint := f.opt.Checkpoint
|
||||
|
||||
quit := make(chan bool)
|
||||
go func() {
|
||||
var ticker *time.Ticker
|
||||
var tickerC <-chan time.Time
|
||||
for {
|
||||
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
|
||||
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
|
||||
fs.Debugf(f, "Unable to save checkpoint: %v", err)
|
||||
}
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
case <-time.After(pollInterval):
|
||||
case pollInterval, ok := <-pollIntervalChan:
|
||||
if !ok {
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
}
|
||||
return
|
||||
}
|
||||
if pollInterval == 0 {
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
ticker, tickerC = nil, nil
|
||||
}
|
||||
} else {
|
||||
ticker = time.NewTicker(pollInterval)
|
||||
tickerC = ticker.C
|
||||
}
|
||||
case <-tickerC:
|
||||
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
|
||||
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
|
||||
fs.Debugf(f, "Unable to save checkpoint: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return quit
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -22,12 +22,14 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/2018-03-28/azblob"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
@@ -45,11 +47,12 @@ const (
|
||||
maxTotalParts = 50000 // in multipart upload
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
// maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
||||
defaultChunkSize = 4 * 1024 * 1024
|
||||
maxChunkSize = 100 * 1024 * 1024
|
||||
defaultUploadCutoff = 256 * 1024 * 1024
|
||||
maxUploadCutoff = 256 * 1024 * 1024
|
||||
defaultChunkSize = 4 * fs.MebiByte
|
||||
maxChunkSize = 100 * fs.MebiByte
|
||||
defaultUploadCutoff = 256 * fs.MebiByte
|
||||
maxUploadCutoff = 256 * fs.MebiByte
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -73,23 +76,44 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload.",
|
||||
Help: "Cutoff for switching to chunked upload (<= 256MB).",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Upload chunk size. Must fit in memory.",
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size (<= 100MB).
|
||||
|
||||
Note that this is stored in memory and there may be up to
|
||||
"--transfers" chunks stored at once in memory.`,
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Help: "Size of blob list.",
|
||||
Name: "list_chunk",
|
||||
Help: `Size of blob list.
|
||||
|
||||
This sets the number of blobs requested in each listing chunk. Default
|
||||
is the maximum, 5000. "List blobs" requests are permitted 2 minutes
|
||||
per megabyte to complete. If an operation is taking longer than 2
|
||||
minutes per megabyte on average, it will time out (
|
||||
[source](https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-blob-service-operations#exceptions-to-default-timeout-interval)
|
||||
). This can be used to limit the number of blobs items to return, to
|
||||
avoid the time out.`,
|
||||
Default: maxListChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "access_tier",
|
||||
Help: "Access tier of blob, supports hot, cool and archive tiers.\nArchived blobs can be restored by setting access tier to hot or cool." +
|
||||
" Leave blank if you intend to use default access tier, which is set at account level",
|
||||
Help: `Access tier of blob: hot, cool or archive.
|
||||
|
||||
Archived blobs can be restored by setting access tier to hot or
|
||||
cool. Leave blank if you intend to use default access tier, which is
|
||||
set at account level
|
||||
|
||||
If there is no "access tier" specified, rclone doesn't apply any tier.
|
||||
rclone performs "Set Tier" operation on blobs while uploading, if objects
|
||||
are not modified, specifying "access tier" to new one will have no effect.
|
||||
If blobs are in "archive tier" at remote, trying to perform data transfer
|
||||
operations from remote will not be allowed. User should first restore by
|
||||
tiering blob to "Hot" or "Cool".`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
@@ -113,6 +137,7 @@ type Fs struct {
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // http client we are using
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
cntURL *azblob.ContainerURL // reference to containerURL
|
||||
container string // the container we are working on
|
||||
@@ -191,19 +216,6 @@ func validateAccessTier(tier string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// validAccessTiers returns list of supported storage tiers on azureblob fs
|
||||
func validAccessTiers() []string {
|
||||
validTiers := [...]azblob.AccessTierType{azblob.AccessTierHot, azblob.AccessTierCool,
|
||||
azblob.AccessTierArchive}
|
||||
|
||||
var tiers [len(validTiers)]string
|
||||
|
||||
for i, tier := range validTiers {
|
||||
tiers[i] = string(tier)
|
||||
}
|
||||
return tiers[:]
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
401, // Unauthorized (eg "Token has expired")
|
||||
@@ -229,6 +241,72 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
if cs > maxChunkSize {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%v must be less than or equal to %v", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// httpClientFactory creates a Factory object that sends HTTP requests
|
||||
// to a rclone's http.Client.
|
||||
//
|
||||
// copied from azblob.newDefaultHTTPClientFactory
|
||||
func httpClientFactory(client *http.Client) pipeline.Factory {
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
r, err := client.Do(request.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = pipeline.NewError(err, "HTTP request failed")
|
||||
}
|
||||
return pipeline.NewHTTPResponse(r), err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// newPipeline creates a Pipeline using the specified credentials and options.
|
||||
//
|
||||
// this code was copied from azblob.NewPipeline
|
||||
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
|
||||
// Closest to API goes first; closest to the wire goes last
|
||||
factories := []pipeline.Factory{
|
||||
azblob.NewTelemetryPolicyFactory(o.Telemetry),
|
||||
azblob.NewUniqueRequestIDPolicyFactory(),
|
||||
azblob.NewRetryPolicyFactory(o.Retry),
|
||||
c,
|
||||
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
|
||||
azblob.NewRequestLogPolicyFactory(o.RequestLog),
|
||||
}
|
||||
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -238,11 +316,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.UploadCutoff > maxUploadCutoff {
|
||||
return nil, errors.Errorf("azure: upload cutoff (%v) must be less than or equal to %v", opt.UploadCutoff, maxUploadCutoff)
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "azure: upload cutoff")
|
||||
}
|
||||
if opt.ChunkSize > maxChunkSize {
|
||||
return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, opt.ChunkSize)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "azure: chunk size")
|
||||
}
|
||||
if opt.ListChunkSize > maxListChunkSize {
|
||||
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||
@@ -262,6 +342,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant).SetPacer(pacer.S3Pacer),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
|
||||
var (
|
||||
u *url.URL
|
||||
serviceURL azblob.ServiceURL
|
||||
@@ -278,7 +375,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
case opt.SASURL != "":
|
||||
@@ -287,7 +384,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
||||
}
|
||||
// use anonymous credentials in case of sas url
|
||||
pipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})
|
||||
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
// Check if we have container level SAS or account level sas
|
||||
parts := azblob.NewBlobURLParts(*u)
|
||||
if parts.ContainerName != "" {
|
||||
@@ -304,25 +401,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
default:
|
||||
return nil, errors.New("Need account+key or connectionString or sasURL")
|
||||
}
|
||||
f.svcURL = &serviceURL
|
||||
f.cntURL = &containerURL
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
svcURL: &serviceURL,
|
||||
cntURL: &containerURL,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ListTiers: true,
|
||||
}).Fill(f)
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the (container,directory) is actually an existing file
|
||||
@@ -428,6 +509,7 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
MaxResults: int32(maxResults),
|
||||
}
|
||||
ctx := context.Background()
|
||||
directoryMarkers := map[string]struct{}{}
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListBlobsHierarchySegmentResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -457,13 +539,29 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote := file.Name[len(f.root):]
|
||||
// Check for directory
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if isDirectory {
|
||||
remote = remote[:len(remote)-1]
|
||||
// is this a directory marker?
|
||||
if *file.Properties.ContentLength == 0 {
|
||||
// Note that metadata with hdi_isfolder = true seems to be a
|
||||
// defacto standard for marking blobs as directories.
|
||||
endsWithSlash := strings.HasSuffix(remote, "/")
|
||||
if endsWithSlash || remote == "" || file.Metadata["hdi_isfolder"] == "true" {
|
||||
if endsWithSlash {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, file, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Keep track of directory markers. If recursing then
|
||||
// there will be no Prefixes so no need to keep track
|
||||
if !recurse {
|
||||
directoryMarkers[remote] = struct{}{}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, file, isDirectory)
|
||||
err = fn(remote, file, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -476,6 +574,10 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote = remote[len(f.root):]
|
||||
// Don't send if already sent as a directory marker
|
||||
if _, found := directoryMarkers[remote]; found {
|
||||
continue
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, nil, true)
|
||||
if err != nil {
|
||||
@@ -662,6 +764,11 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
f.containerOK = true
|
||||
return false, nil
|
||||
case azblob.ServiceCodeContainerBeingDeleted:
|
||||
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
|
||||
// When a container is deleted, a container with the same name cannot be created
|
||||
// for at least 30 seconds; the container may not be available for more than 30
|
||||
// seconds if the service is still processing the request.
|
||||
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
|
||||
f.containerDeleted = true
|
||||
return true, err
|
||||
}
|
||||
@@ -679,7 +786,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// isEmpty checks to see if a given directory is empty and returns an error if not
|
||||
func (f *Fs) isEmpty(dir string) (err error) {
|
||||
empty := true
|
||||
err = f.list("", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
err = f.list(dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
empty = false
|
||||
return nil
|
||||
})
|
||||
@@ -1247,11 +1354,20 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
Metadata: o.meta,
|
||||
BlobHTTPHeaders: httpHeaders,
|
||||
}
|
||||
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
|
||||
// is merged the SDK can't upload a single blob of exactly the chunk
|
||||
// size, so upload with a multpart upload to work around.
|
||||
// See: https://github.com/ncw/rclone/issues/2653
|
||||
multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
|
||||
if size == int64(o.fs.opt.ChunkSize) {
|
||||
multipartUpload = true
|
||||
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
if size >= int64(o.fs.opt.UploadCutoff) {
|
||||
if multipartUpload {
|
||||
// If a large file upload in chunks
|
||||
err = o.uploadMultipart(in, size, &blob, &httpHeaders)
|
||||
} else {
|
||||
@@ -1316,7 +1432,7 @@ func (o *Object) SetTier(tier string) error {
|
||||
blob := o.getBlobReference()
|
||||
ctx := context.Background()
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier)
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
|
||||
@@ -1337,11 +1453,6 @@ func (o *Object) GetTier() string {
|
||||
return string(o.accessTier)
|
||||
}
|
||||
|
||||
// ListTiers returns list of storage tiers supported on this object
|
||||
func (o *Object) ListTiers() []string {
|
||||
return validAccessTiers()
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -11,9 +11,7 @@ import (
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// Check first feature flags are set on this
|
||||
// remote
|
||||
enabled := f.Features().ListTiers
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().SetTier
|
||||
enabled := f.Features().SetTier
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
|
||||
@@ -1,20 +1,37 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob_test
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/azureblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*azureblob.Object)(nil),
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build freebsd netbsd openbsd plan9 solaris !go1.8
|
||||
// +build plan9 solaris !go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -136,6 +136,7 @@ type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
|
||||
122
backend/b2/b2.go
122
backend/b2/b2.go
@@ -48,9 +48,9 @@ const (
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
minChunkSize = 5E6
|
||||
defaultChunkSize = 96 * 1024 * 1024
|
||||
defaultUploadCutoff = 200E6
|
||||
minChunkSize = 5 * fs.MebiByte
|
||||
defaultChunkSize = 96 * fs.MebiByte
|
||||
defaultUploadCutoff = 200 * fs.MebiByte
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -77,14 +77,24 @@ func init() {
|
||||
Help: "Endpoint for the service.\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "test_mode",
|
||||
Help: "A flag string for X-Bz-Test-Mode header for debugging.",
|
||||
Name: "test_mode",
|
||||
Help: `A flag string for X-Bz-Test-Mode header for debugging.
|
||||
|
||||
This is for debugging purposes only. Setting it to one of the strings
|
||||
below will cause b2 to return specific errors:
|
||||
|
||||
* "fail_some_uploads"
|
||||
* "expire_some_account_authorization_tokens"
|
||||
* "force_cap_exceeded"
|
||||
|
||||
These will be set in the "X-Bz-Test-Mode" header which is documented
|
||||
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
|
||||
Default: "",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "versions",
|
||||
Help: "Include old versions in directory listings.",
|
||||
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -92,13 +102,22 @@ func init() {
|
||||
Help: "Permanently delete files on remote removal, otherwise hide files.",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload.",
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
|
||||
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||
|
||||
This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Upload chunk size. Must fit in memory.",
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size. Must fit in memory.
|
||||
|
||||
When uploading large files, chunk the file into this size. Note that
|
||||
these chunks are buffered in memory and there might a maximum of
|
||||
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
|
||||
minimim size.`,
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -263,6 +282,37 @@ func errorHandler(resp *http.Response) error {
|
||||
return errResponse
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
f.fillBufferTokens() // reset the buffer tokens
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
|
||||
if cs < opt.ChunkSize {
|
||||
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(&f.opt, cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -271,11 +321,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.UploadCutoff < opt.ChunkSize {
|
||||
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", opt.UploadCutoff, opt.ChunkSize)
|
||||
err = checkUploadCutoff(opt, opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2: upload cutoff")
|
||||
}
|
||||
if opt.ChunkSize < minChunkSize {
|
||||
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, opt.ChunkSize)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2: chunk size")
|
||||
}
|
||||
bucket, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
@@ -291,13 +343,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt.Endpoint = defaultEndpoint
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
bufferTokens: make(chan []byte, fs.Config.Transfers),
|
||||
name: name,
|
||||
opt: *opt,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -310,16 +361,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.srv.SetHeader(testModeHeader, testMode)
|
||||
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
|
||||
}
|
||||
// Fill up the buffer tokens
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
f.bufferTokens <- nil
|
||||
}
|
||||
f.fillBufferTokens()
|
||||
err = f.authorizeAccount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to authorize account")
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.bucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := f.info.Allowed.BucketName
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
if allowedBucket != f.bucket {
|
||||
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
}
|
||||
f.markBucketOK()
|
||||
f.setBucketID(f.info.Allowed.BucketID)
|
||||
}
|
||||
@@ -421,6 +476,14 @@ func (f *Fs) clearUploadURL() {
|
||||
f.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Fill up (or reset) the buffer tokens
|
||||
func (f *Fs) fillBufferTokens() {
|
||||
f.bufferTokens = make(chan []byte, fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
f.bufferTokens <- nil
|
||||
}
|
||||
}
|
||||
|
||||
// getUploadBlock gets a block from the pool of size chunkSize
|
||||
func (f *Fs) getUploadBlock() []byte {
|
||||
buf := <-f.bufferTokens
|
||||
@@ -924,6 +987,12 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
errReturn = err
|
||||
}
|
||||
}
|
||||
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
||||
if time.Since(time.Time(timestamp)).Hours() > 24 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Delete Config.Transfers in parallel
|
||||
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
|
||||
@@ -947,6 +1016,9 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
if object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
toBeDeleted <- object
|
||||
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||
toBeDeleted <- object
|
||||
} else {
|
||||
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test B2 filesystem interface
|
||||
package b2_test
|
||||
package b2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,23 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestB2:",
|
||||
NilObject: (*b2.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
NeedMultipleChunks: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -85,7 +85,7 @@ func init() {
|
||||
Help: "Box App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload.",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -252,7 +252,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Box: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
@@ -283,16 +283,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, rootID, &newF)
|
||||
newF.root = newRoot
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -300,9 +300,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f.features.Fill(&newF)
|
||||
f.features.Fill(&tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
222
backend/cache/cache.go
vendored
222
backend/cache/cache.go
vendored
@@ -11,6 +11,7 @@ import (
|
||||
"os/signal"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -23,6 +24,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
@@ -86,8 +88,12 @@ func init() {
|
||||
Help: "Skip all certificate verifications when connecting to the Plex server",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading.",
|
||||
Name: "chunk_size",
|
||||
Help: `The size of a chunk (partial file data).
|
||||
|
||||
Use lower numbers for slower connections. If the chunk size is
|
||||
changed, any downloaded chunks will be invalid and cache-chunk-path
|
||||
will need to be cleared or unexpected EOF errors will occur.`,
|
||||
Default: DefCacheChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "1m",
|
||||
@@ -100,8 +106,10 @@ func init() {
|
||||
Help: "10 MB",
|
||||
}},
|
||||
}, {
|
||||
Name: "info_age",
|
||||
Help: "How much time should object info (file size, file hashes etc) be stored in cache.\nUse a very high value if you don't plan on changing the source FS from outside the cache.\nAccepted units are: \"s\", \"m\", \"h\".",
|
||||
Name: "info_age",
|
||||
Help: `How long to cache file structure information (directory listings, file size, times etc).
|
||||
If all write operations are done through the cache then you can safely make
|
||||
this value very large as the cache store will also be updated in real time.`,
|
||||
Default: DefCacheInfoAge,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "1h",
|
||||
@@ -114,8 +122,11 @@ func init() {
|
||||
Help: "48 hours",
|
||||
}},
|
||||
}, {
|
||||
Name: "chunk_total_size",
|
||||
Help: "The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted.",
|
||||
Name: "chunk_total_size",
|
||||
Help: `The total size that the chunks can take up on the local disk.
|
||||
|
||||
If the cache exceeds this value then it will start to delete the
|
||||
oldest chunks until it goes under this value.`,
|
||||
Default: DefCacheTotalChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "500M",
|
||||
@@ -130,63 +141,143 @@ func init() {
|
||||
}, {
|
||||
Name: "db_path",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: "Directory to cache DB",
|
||||
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_path",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: "Directory to cache chunk files",
|
||||
Name: "chunk_path",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: `Directory to cache chunk files.
|
||||
|
||||
Path to where partial file data (chunks) are stored locally. The remote
|
||||
name is appended to the final path.
|
||||
|
||||
This config follows the "--cache-db-path". If you specify a custom
|
||||
location for "--cache-db-path" and don't specify one for "--cache-chunk-path"
|
||||
then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "db_purge",
|
||||
Default: false,
|
||||
Help: "Purge the cache DB before",
|
||||
Help: "Clear all the cached data for this remote on start.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_clean_interval",
|
||||
Default: DefCacheChunkCleanInterval,
|
||||
Help: "Interval at which chunk cleanup runs",
|
||||
Name: "chunk_clean_interval",
|
||||
Default: DefCacheChunkCleanInterval,
|
||||
Help: `How often should the cache perform cleanups of the chunk storage.
|
||||
The default value should be ok for most people. If you find that the
|
||||
cache goes over "cache-chunk-total-size" too often then try to lower
|
||||
this value to force it to perform cleanups more often.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "read_retries",
|
||||
Default: DefCacheReadRetries,
|
||||
Help: "How many times to retry a read from a cache storage",
|
||||
Name: "read_retries",
|
||||
Default: DefCacheReadRetries,
|
||||
Help: `How many times to retry a read from a cache storage.
|
||||
|
||||
Since reading from a cache stream is independent from downloading file
|
||||
data, readers can get to a point where there's no more data in the
|
||||
cache. Most of the times this can indicate a connectivity issue if
|
||||
cache isn't able to provide file data anymore.
|
||||
|
||||
For really slow connections, increase this to a point where the stream is
|
||||
able to provide data but your experience will be very stuttering.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "workers",
|
||||
Default: DefCacheTotalWorkers,
|
||||
Help: "How many workers should run in parallel to download chunks",
|
||||
Name: "workers",
|
||||
Default: DefCacheTotalWorkers,
|
||||
Help: `How many workers should run in parallel to download chunks.
|
||||
|
||||
Higher values will mean more parallel processing (better CPU needed)
|
||||
and more concurrent requests on the cloud provider. This impacts
|
||||
several aspects like the cloud provider API limits, more stress on the
|
||||
hardware that rclone runs on but it also means that streams will be
|
||||
more fluid and data will be available much more faster to readers.
|
||||
|
||||
**Note**: If the optional Plex integration is enabled then this
|
||||
setting will adapt to the type of reading performed and the value
|
||||
specified here will be used as a maximum number of workers to use.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_no_memory",
|
||||
Default: DefCacheChunkNoMemory,
|
||||
Help: "Disable the in-memory cache for storing chunks during streaming",
|
||||
Name: "chunk_no_memory",
|
||||
Default: DefCacheChunkNoMemory,
|
||||
Help: `Disable the in-memory cache for storing chunks during streaming.
|
||||
|
||||
By default, cache will keep file data during streaming in RAM as well
|
||||
to provide it to readers as fast as possible.
|
||||
|
||||
This transient data is evicted as soon as it is read and the number of
|
||||
chunks stored doesn't exceed the number of workers. However, depending
|
||||
on other settings like "cache-chunk-size" and "cache-workers" this footprint
|
||||
can increase if there are parallel streams too (multiple files being read
|
||||
at the same time).
|
||||
|
||||
If the hardware permits it, use this feature to provide an overall better
|
||||
performance during streaming but it can also be disabled if RAM is not
|
||||
available on the local machine.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "rps",
|
||||
Default: int(DefCacheRps),
|
||||
Help: "Limits the number of requests per second to the source FS. -1 disables the rate limiter",
|
||||
Name: "rps",
|
||||
Default: int(DefCacheRps),
|
||||
Help: `Limits the number of requests per second to the source FS (-1 to disable)
|
||||
|
||||
This setting places a hard limit on the number of requests per second
|
||||
that cache will be doing to the cloud provider remote and try to
|
||||
respect that value by setting waits between reads.
|
||||
|
||||
If you find that you're getting banned or limited on the cloud
|
||||
provider through cache and know that a smaller number of requests per
|
||||
second will allow you to work with it then you can use this setting
|
||||
for that.
|
||||
|
||||
A good balance of all the other settings should make this setting
|
||||
useless but it is available to set for more special cases.
|
||||
|
||||
**NOTE**: This will limit the number of requests during streams but
|
||||
other API calls to the cloud provider like directory listings will
|
||||
still pass.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "writes",
|
||||
Default: DefCacheWrites,
|
||||
Help: "Will cache file data on writes through the FS",
|
||||
Name: "writes",
|
||||
Default: DefCacheWrites,
|
||||
Help: `Cache file data on writes through the FS
|
||||
|
||||
If you need to read files immediately after you upload them through
|
||||
cache you can enable this flag to have their data stored in the
|
||||
cache store at the same time during upload.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "tmp_upload_path",
|
||||
Default: "",
|
||||
Help: "Directory to keep temporary files until they are uploaded to the cloud storage",
|
||||
Name: "tmp_upload_path",
|
||||
Default: "",
|
||||
Help: `Directory to keep temporary files until they are uploaded.
|
||||
|
||||
This is the path where cache will use as a temporary storage for new
|
||||
files that need to be uploaded to the cloud provider.
|
||||
|
||||
Specifying a value will enable this feature. Without it, it is
|
||||
completely disabled and files will be uploaded directly to the cloud
|
||||
provider`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "tmp_wait_time",
|
||||
Default: DefCacheTmpWaitTime,
|
||||
Help: "How long should files be stored in local cache before being uploaded",
|
||||
Name: "tmp_wait_time",
|
||||
Default: DefCacheTmpWaitTime,
|
||||
Help: `How long should files be stored in local cache before being uploaded
|
||||
|
||||
This is the duration that a file must wait in the temporary location
|
||||
_cache-tmp-upload-path_ before it is selected for upload.
|
||||
|
||||
Note that only one file is uploaded at a time and it can take longer
|
||||
to start the upload if a queue formed for this purpose.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "db_wait_time",
|
||||
Default: DefCacheDbWaitTime,
|
||||
Help: "How long to wait for the DB to be available - 0 is unlimited",
|
||||
Name: "db_wait_time",
|
||||
Default: DefCacheDbWaitTime,
|
||||
Help: `How long to wait for the DB to be available - 0 is unlimited
|
||||
|
||||
Only one process can have the DB open at any one time, so rclone waits
|
||||
for this duration for the DB to become available before it gives an
|
||||
error.
|
||||
|
||||
If you set it to 0 then it will wait forever.`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
@@ -273,7 +364,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
|
||||
}
|
||||
|
||||
remotePath := path.Join(wPath, rootPath)
|
||||
remotePath := fspath.JoinRootPath(wPath, rootPath)
|
||||
wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
|
||||
@@ -380,7 +471,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
fs.Infof(name, "Chunk Clean Interval: %v", f.opt.ChunkCleanInterval)
|
||||
fs.Infof(name, "Workers: %v", f.opt.TotalWorkers)
|
||||
fs.Infof(name, "File Age: %v", f.opt.InfoAge)
|
||||
if !f.opt.StoreWrites {
|
||||
if f.opt.StoreWrites {
|
||||
fs.Infof(name, "Cache Writes: enabled")
|
||||
}
|
||||
|
||||
@@ -415,7 +506,9 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}()
|
||||
|
||||
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
||||
doChangeNotify(f.receiveChangeNotify, time.Duration(f.opt.ChunkCleanInterval))
|
||||
pollInterval := make(chan time.Duration, 1)
|
||||
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
||||
doChangeNotify(f.receiveChangeNotify, pollInterval)
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
@@ -780,12 +873,15 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
|
||||
// ChangeNotify can subsribe multiple callers
|
||||
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
|
||||
// and also notifies other caches (i.e VFS) to clear out whenever something changes
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||
f.parentsForgetMu.Lock()
|
||||
defer f.parentsForgetMu.Unlock()
|
||||
fs.Debugf(f, "subscribing to ChangeNotify")
|
||||
f.parentsForgetFn = append(f.parentsForgetFn, notifyFunc)
|
||||
return make(chan bool)
|
||||
go func() {
|
||||
for range pollInterval {
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
@@ -886,7 +982,6 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
fs.Debugf(dir, "list: cached entries: %v", entries)
|
||||
return entries, nil
|
||||
}
|
||||
// FIXME need to clean existing cached listing
|
||||
|
||||
// we first search any temporary files stored locally
|
||||
var cachedEntries fs.DirEntries
|
||||
@@ -912,27 +1007,42 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
|
||||
// search from the source
|
||||
entries, err = f.Fs.List(dir)
|
||||
sourceEntries, err := f.Fs.List(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(dir, "list: read %v from source", len(entries))
|
||||
fs.Debugf(dir, "list: source entries: %v", entries)
|
||||
fs.Debugf(dir, "list: read %v from source", len(sourceEntries))
|
||||
fs.Debugf(dir, "list: source entries: %v", sourceEntries)
|
||||
|
||||
sort.Sort(sourceEntries)
|
||||
for _, entry := range entries {
|
||||
entryRemote := entry.Remote()
|
||||
i := sort.Search(len(sourceEntries), func(i int) bool { return sourceEntries[i].Remote() >= entryRemote })
|
||||
if i < len(sourceEntries) && sourceEntries[i].Remote() == entryRemote {
|
||||
continue
|
||||
}
|
||||
fp := path.Join(f.Root(), entryRemote)
|
||||
switch entry.(type) {
|
||||
case fs.Object:
|
||||
_ = f.cache.RemoveObject(fp)
|
||||
case fs.Directory:
|
||||
_ = f.cache.RemoveDir(fp)
|
||||
}
|
||||
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
|
||||
}
|
||||
entries = nil
|
||||
|
||||
// and then iterate over the ones from source (temp Objects will override source ones)
|
||||
var batchDirectories []*Directory
|
||||
for _, entry := range entries {
|
||||
sort.Sort(cachedEntries)
|
||||
tmpCnt := len(cachedEntries)
|
||||
for _, entry := range sourceEntries {
|
||||
switch o := entry.(type) {
|
||||
case fs.Object:
|
||||
// skip over temporary objects (might be uploading)
|
||||
found := false
|
||||
for _, t := range cachedEntries {
|
||||
if t.Remote() == o.Remote() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
oRemote := o.Remote()
|
||||
i := sort.Search(tmpCnt, func(i int) bool { return cachedEntries[i].Remote() >= oRemote })
|
||||
if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
|
||||
continue
|
||||
}
|
||||
co := ObjectFromOriginal(f, o).persist()
|
||||
|
||||
39
backend/cache/cache_internal_test.go
vendored
39
backend/cache/cache_internal_test.go
vendored
@@ -4,6 +4,9 @@ package cache_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
@@ -12,21 +15,12 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"encoding/base64"
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
@@ -36,10 +30,11 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/object"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fs/rc/rcflags"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -695,8 +690,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
rcflags.Opt.Enabled = true
|
||||
rc.Start(&rcflags.Opt)
|
||||
cacheExpire := rc.Calls.Get("cache/expire")
|
||||
assert.NotNil(t, cacheExpire)
|
||||
|
||||
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
@@ -729,13 +724,8 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
||||
|
||||
m := make(map[string]string)
|
||||
res, err := http.Post(fmt.Sprintf("http://localhost:5572/cache/expire?remote=%s", "data.bin"), "application/json; charset=utf-8", strings.NewReader(""))
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = res.Body.Close()
|
||||
}()
|
||||
_ = json.NewDecoder(res.Body).Decode(&m)
|
||||
// Call the rc function
|
||||
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
@@ -755,13 +745,8 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
li1, err = runInstance.list(t, rootFs, "")
|
||||
require.Len(t, li1, 1)
|
||||
|
||||
m = make(map[string]string)
|
||||
res2, err := http.Post("http://localhost:5572/cache/expire?remote=/", "application/json; charset=utf-8", strings.NewReader(""))
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = res2.Body.Close()
|
||||
}()
|
||||
_ = json.NewDecoder(res2.Body).Decode(&m)
|
||||
// Call the rc function
|
||||
m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
|
||||
3
backend/cache/cache_upload_test.go
vendored
3
backend/cache/cache_upload_test.go
vendored
@@ -3,6 +3,7 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
@@ -10,8 +11,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
|
||||
3
backend/cache/directory.go
vendored
3
backend/cache/directory.go
vendored
@@ -3,9 +3,8 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
5
backend/cache/handle.go
vendored
5
backend/cache/handle.go
vendored
@@ -5,12 +5,11 @@ package cache
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
|
||||
10
backend/cache/plex.go
vendored
10
backend/cache/plex.go
vendored
@@ -3,21 +3,19 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
|
||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
||||
9
backend/cache/storage_persistent.go
vendored
9
backend/cache/storage_persistent.go
vendored
@@ -3,20 +3,17 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"io/ioutil"
|
||||
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
bolt "github.com/coreos/bbolt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
|
||||
@@ -17,11 +17,9 @@ import (
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/rfjakob/eme"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
|
||||
"github.com/rfjakob/eme"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -43,6 +41,7 @@ var (
|
||||
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
|
||||
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
|
||||
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
|
||||
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
|
||||
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
|
||||
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
|
||||
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
|
||||
@@ -286,6 +285,9 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
// not possible if decodeFilename() working correctly
|
||||
return "", ErrorTooShortAfterDecode
|
||||
}
|
||||
if len(rawCiphertext) > 2048 {
|
||||
return "", ErrorTooLongAfterDecode
|
||||
}
|
||||
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
|
||||
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
|
||||
if err != nil {
|
||||
|
||||
@@ -194,6 +194,10 @@ func TestEncryptSegment(t *testing.T) {
|
||||
|
||||
func TestDecryptSegment(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 3328)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -201,6 +205,7 @@ func TestDecryptSegment(t *testing.T) {
|
||||
}{
|
||||
{"64=", ErrorBadBase32Encoding},
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
|
||||
@@ -4,7 +4,6 @@ package crypt
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -13,6 +12,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -67,8 +67,16 @@ func init() {
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "show_mapping",
|
||||
Help: "For all files listed show how the names encrypt.",
|
||||
Name: "show_mapping",
|
||||
Help: `For all files listed show how the names encrypt.
|
||||
|
||||
If this flag is set then for each file that the remote is asked to
|
||||
list, it will log (at level INFO) a line stating the decrypted file
|
||||
name and the encrypted file name.
|
||||
|
||||
This is so you can work out which encrypted names are which decrypted
|
||||
names just in case you need to do something with the encrypted file
|
||||
names, or for debugging purposes.`,
|
||||
Default: false,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
@@ -135,11 +143,11 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := path.Join(wPath, cipher.EncryptFileName(rpath))
|
||||
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
// if that didn't produce a file, look for a directory
|
||||
if err != fs.ErrorIsFile {
|
||||
remotePath = path.Join(wPath, cipher.EncryptDirName(rpath))
|
||||
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
|
||||
}
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
@@ -165,7 +173,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
doChangeNotify := wrappedFs.Features().ChangeNotify
|
||||
if doChangeNotify != nil {
|
||||
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
decrypted, err := f.DecryptFileName(path)
|
||||
if err != nil {
|
||||
@@ -174,7 +182,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
notifyFunc(decrypted, entryType)
|
||||
}
|
||||
return doChangeNotify(wrappedNotifyFunc, pollInterval)
|
||||
doChangeNotify(wrappedNotifyFunc, pollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,13 +7,30 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive" // for integration tests
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/swift" // for integration tests
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// TestStandard runs integration tests against the remote
|
||||
func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -30,6 +47,9 @@ func TestStandard(t *testing.T) {
|
||||
|
||||
// TestOff runs integration tests against the remote
|
||||
func TestOff(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
|
||||
name := "TestCrypt2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -46,6 +66,9 @@ func TestOff(t *testing.T) {
|
||||
|
||||
// TestObfuscate runs integration tests against the remote
|
||||
func TestObfuscate(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,62 +1,57 @@
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/api/drive/v3"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/drive/v3"
|
||||
)
|
||||
|
||||
const exampleExportFormats = `{
|
||||
"application/vnd.google-apps.document": [
|
||||
"application/rtf",
|
||||
"application/vnd.oasis.opendocument.text",
|
||||
"text/html",
|
||||
"application/pdf",
|
||||
"application/epub+zip",
|
||||
"application/zip",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.spreadsheet": [
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet",
|
||||
"text/tab-separated-values",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"text/csv",
|
||||
"application/zip",
|
||||
"application/vnd.oasis.opendocument.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.jam": [
|
||||
"application/pdf"
|
||||
],
|
||||
"application/vnd.google-apps.script": [
|
||||
"application/vnd.google-apps.script+json"
|
||||
],
|
||||
"application/vnd.google-apps.presentation": [
|
||||
"application/vnd.oasis.opendocument.presentation",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.form": [
|
||||
"application/zip"
|
||||
],
|
||||
"application/vnd.google-apps.drawing": [
|
||||
"image/svg+xml",
|
||||
"image/png",
|
||||
"application/pdf",
|
||||
"image/jpeg"
|
||||
]
|
||||
}`
|
||||
/*
|
||||
var additionalMimeTypes = map[string]string{
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
|
||||
"application/vnd.ms-excel.template.macroenabled.12": ".xltm",
|
||||
"application/vnd.ms-powerpoint.presentation.macroenabled.12": ".pptm",
|
||||
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": ".ppsm",
|
||||
"application/vnd.ms-powerpoint.template.macroenabled.12": ".potm",
|
||||
"application/vnd.ms-powerpoint": ".ppt",
|
||||
"application/vnd.ms-word.document.macroenabled.12": ".docm",
|
||||
"application/vnd.ms-word.template.macroenabled.12": ".dotm",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.template": ".potx",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": ".xltx",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": ".dotx",
|
||||
"application/vnd.sun.xml.writer": ".sxw",
|
||||
"text/richtext": ".rtf",
|
||||
}
|
||||
*/
|
||||
|
||||
// Load the example export formats into exportFormats for testing
|
||||
func TestInternalLoadExampleExportFormats(t *testing.T) {
|
||||
exportFormatsOnce.Do(func() {})
|
||||
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &_exportFormats))
|
||||
func TestInternalLoadExampleFormats(t *testing.T) {
|
||||
fetchFormatsOnce.Do(func() {})
|
||||
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
|
||||
var about struct {
|
||||
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
|
||||
ImportFormats map[string][]string `json:"importFormats,omitempty"`
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, json.Unmarshal(buf, &about))
|
||||
_exportFormats = fixMimeTypeMap(about.ExportFormats)
|
||||
_importFormats = fixMimeTypeMap(about.ImportFormats)
|
||||
}
|
||||
|
||||
func TestInternalParseExtensions(t *testing.T) {
|
||||
@@ -65,27 +60,24 @@ func TestInternalParseExtensions(t *testing.T) {
|
||||
want []string
|
||||
wantErr error
|
||||
}{
|
||||
{"doc", []string{"doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
|
||||
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
|
||||
{"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
|
||||
{"doc", []string{".doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
|
||||
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
||||
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
||||
} {
|
||||
f := new(Fs)
|
||||
gotErr := f.parseExtensions(test.in)
|
||||
extensions, _, gotErr := parseExtensions(test.in)
|
||||
if test.wantErr == nil {
|
||||
assert.NoError(t, gotErr)
|
||||
} else {
|
||||
assert.EqualError(t, gotErr, test.wantErr.Error())
|
||||
}
|
||||
assert.Equal(t, test.want, f.extensions)
|
||||
assert.Equal(t, test.want, extensions)
|
||||
}
|
||||
|
||||
// Test it is appending
|
||||
f := new(Fs)
|
||||
assert.Nil(t, f.parseExtensions("docx,svg"))
|
||||
assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
|
||||
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
|
||||
|
||||
extensions, _, gotErr := parseExtensions("docx,svg", "docx,svg,xlsx")
|
||||
assert.NoError(t, gotErr)
|
||||
assert.Equal(t, []string{".docx", ".svg", ".xlsx"}, extensions)
|
||||
}
|
||||
|
||||
func TestInternalFindExportFormat(t *testing.T) {
|
||||
@@ -99,17 +91,17 @@ func TestInternalFindExportFormat(t *testing.T) {
|
||||
wantMimeType string
|
||||
}{
|
||||
{[]string{}, "", ""},
|
||||
{[]string{"pdf"}, "pdf", "application/pdf"},
|
||||
{[]string{"pdf", "rtf", "xls"}, "pdf", "application/pdf"},
|
||||
{[]string{"xls", "rtf", "pdf"}, "rtf", "application/rtf"},
|
||||
{[]string{"xls", "csv", "svg"}, "", ""},
|
||||
{[]string{".pdf"}, ".pdf", "application/pdf"},
|
||||
{[]string{".pdf", ".rtf", ".xls"}, ".pdf", "application/pdf"},
|
||||
{[]string{".xls", ".rtf", ".pdf"}, ".rtf", "application/rtf"},
|
||||
{[]string{".xls", ".csv", ".svg"}, "", ""},
|
||||
} {
|
||||
f := new(Fs)
|
||||
f.extensions = test.extensions
|
||||
f.exportExtensions = test.extensions
|
||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
|
||||
assert.Equal(t, test.wantExtension, gotExtension)
|
||||
if test.wantExtension != "" {
|
||||
assert.Equal(t, item.Name+"."+gotExtension, gotFilename)
|
||||
assert.Equal(t, item.Name+gotExtension, gotFilename)
|
||||
} else {
|
||||
assert.Equal(t, "", gotFilename)
|
||||
}
|
||||
@@ -117,3 +109,155 @@ func TestInternalFindExportFormat(t *testing.T) {
|
||||
assert.Equal(t, true, gotIsDocument)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMimeTypesToExtension(t *testing.T) {
|
||||
for mimeType, extension := range _mimeTypeToExtension {
|
||||
extensions, err := mime.ExtensionsByType(mimeType)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, extensions, extension)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionToMimeType(t *testing.T) {
|
||||
for mimeType, extension := range _mimeTypeToExtension {
|
||||
gotMimeType := mime.TypeByExtension(extension)
|
||||
mediatype, _, err := mime.ParseMediaType(gotMimeType)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, mimeType, mediatype)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionsForExportFormats(t *testing.T) {
|
||||
if _exportFormats == nil {
|
||||
t.Error("exportFormats == nil")
|
||||
}
|
||||
for fromMT, toMTs := range _exportFormats {
|
||||
for _, toMT := range toMTs {
|
||||
if !isInternalMimeType(toMT) {
|
||||
extensions, err := mime.ExtensionsByType(toMT)
|
||||
assert.NoError(t, err, "invalid MIME type %q", toMT)
|
||||
assert.NotEmpty(t, extensions, "No extension found for %q (from: %q)", fromMT, toMT)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionsForImportFormats(t *testing.T) {
|
||||
t.Skip()
|
||||
if _importFormats == nil {
|
||||
t.Error("_importFormats == nil")
|
||||
}
|
||||
for fromMT := range _importFormats {
|
||||
if !isInternalMimeType(fromMT) {
|
||||
extensions, err := mime.ExtensionsByType(fromMT)
|
||||
assert.NoError(t, err, "invalid MIME type %q", fromMT)
|
||||
assert.NotEmpty(t, extensions, "No extension found for %q", fromMT)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
oldAllow := f.opt.AllowImportNameChange
|
||||
f.opt.AllowImportNameChange = true
|
||||
defer func() {
|
||||
f.opt.AllowImportNameChange = oldAllow
|
||||
}()
|
||||
|
||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||
require.NoError(t, err)
|
||||
|
||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
|
||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||
require.NoError(t, err)
|
||||
|
||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentExport(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
|
||||
f.exportExtensions, _, err = parseExtensions("txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := f.NewObject("example2.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open()
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
_, err = io.Copy(&buf, rc)
|
||||
require.NoError(t, err)
|
||||
text := buf.String()
|
||||
|
||||
for _, excerpt := range []string{
|
||||
"Lorem ipsum dolor sit amet, consectetur",
|
||||
"porta at ultrices in, consectetur at augue.",
|
||||
} {
|
||||
require.Contains(t, text, excerpt)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
|
||||
f.exportExtensions, _, err = parseExtensions("link.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := f.NewObject("example2.link.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open()
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
_, err = io.Copy(&buf, rc)
|
||||
require.NoError(t, err)
|
||||
text := buf.String()
|
||||
|
||||
require.True(t, strings.HasPrefix(text, "<html>"))
|
||||
require.True(t, strings.HasSuffix(text, "</html>\n"))
|
||||
for _, excerpt := range []string{
|
||||
`<meta http-equiv="refresh"`,
|
||||
`Loading <a href="`,
|
||||
} {
|
||||
require.Contains(t, text, excerpt)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
f.InternalTestDocumentImport(t)
|
||||
t.Run("DocumentUpdate", func(t *testing.T) {
|
||||
f.InternalTestDocumentUpdate(t)
|
||||
t.Run("DocumentExport", func(t *testing.T) {
|
||||
f.InternalTestDocumentExport(t)
|
||||
t.Run("DocumentLink", func(t *testing.T) {
|
||||
f.InternalTestDocumentLink(t)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
// Test Drive filesystem interface
|
||||
package drive_test
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +15,23 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDrive:",
|
||||
NilObject: (*drive.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
CeilChunkSize: fstests.NextPowerOfTwo,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
6
backend/drive/drive_unsupported.go
Normal file
6
backend/drive/drive_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package drive
|
||||
178
backend/drive/test/about.json
Normal file
178
backend/drive/test/about.json
Normal file
@@ -0,0 +1,178 @@
|
||||
{
|
||||
"importFormats": {
|
||||
"text/tab-separated-values": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/x-vnd.oasis.opendocument.presentation": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"image/jpeg": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"image/bmp": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"image/gif": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-powerpoint.presentation.macroenabled.12": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.ms-word.template.macroenabled.12": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"image/pjpeg": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.google-apps.script+text/plain": [
|
||||
"application/vnd.google-apps.script"
|
||||
],
|
||||
"application/vnd.ms-excel": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.sun.xml.writer": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-word.document.macroenabled.12": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"text/rtf": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"text/plain": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.oasis.opendocument.spreadsheet": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"image/png": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/x-vnd.oasis.opendocument.text": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/msword": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/pdf": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/json": [
|
||||
"application/vnd.google-apps.script"
|
||||
],
|
||||
"application/x-msmetafile": [
|
||||
"application/vnd.google-apps.drawing"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.ms-powerpoint": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.ms-excel.template.macroenabled.12": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"image/x-bmp": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/rtf": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.template": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"image/x-png": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"text/html": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.oasis.opendocument.text": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.script+json": [
|
||||
"application/vnd.google-apps.script"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.slideshow": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.ms-powerpoint.template.macroenabled.12": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"text/csv": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.oasis.opendocument.presentation": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"image/jpg": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"text/richtext": [
|
||||
"application/vnd.google-apps.document"
|
||||
]
|
||||
},
|
||||
"exportFormats": {
|
||||
"application/vnd.google-apps.document": [
|
||||
"application/rtf",
|
||||
"application/vnd.oasis.opendocument.text",
|
||||
"text/html",
|
||||
"application/pdf",
|
||||
"application/epub+zip",
|
||||
"application/zip",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.spreadsheet": [
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet",
|
||||
"text/tab-separated-values",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"text/csv",
|
||||
"application/zip",
|
||||
"application/vnd.oasis.opendocument.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.jam": [
|
||||
"application/pdf"
|
||||
],
|
||||
"application/vnd.google-apps.script": [
|
||||
"application/vnd.google-apps.script+json"
|
||||
],
|
||||
"application/vnd.google-apps.presentation": [
|
||||
"application/vnd.oasis.opendocument.presentation",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.form": [
|
||||
"application/zip"
|
||||
],
|
||||
"application/vnd.google-apps.drawing": [
|
||||
"image/svg+xml",
|
||||
"image/png",
|
||||
"application/pdf",
|
||||
"image/jpeg"
|
||||
]
|
||||
}
|
||||
}
|
||||
BIN
backend/drive/test/files/example1.ods
Normal file
BIN
backend/drive/test/files/example1.ods
Normal file
Binary file not shown.
BIN
backend/drive/test/files/example2.doc
Normal file
BIN
backend/drive/test/files/example2.doc
Normal file
Binary file not shown.
BIN
backend/drive/test/files/example3.odt
Normal file
BIN
backend/drive/test/files/example3.odt
Normal file
Binary file not shown.
@@ -8,6 +8,8 @@
|
||||
//
|
||||
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
@@ -50,11 +52,12 @@ type resumableUpload struct {
|
||||
}
|
||||
|
||||
// Upload the io.Reader in of size bytes with contentType and info
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
|
||||
params := make(url.Values)
|
||||
params.Set("alt", "json")
|
||||
params.Set("uploadType", "resumable")
|
||||
params.Set("fields", partialFields)
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
|
||||
params := url.Values{
|
||||
"alt": {"json"},
|
||||
"uploadType": {"resumable"},
|
||||
"fields": {partialFields},
|
||||
}
|
||||
if f.isTeamDrive {
|
||||
params.Set("supportsTeamDrives", "true")
|
||||
}
|
||||
|
||||
@@ -31,9 +31,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
@@ -79,8 +81,8 @@ const (
|
||||
// Choose 48MB which is 91% of Maximum speed. rclone by
|
||||
// default does 4 transfers so this should use 4*48MB = 192MB
|
||||
// by default.
|
||||
defaultChunkSize = 48 * 1024 * 1024
|
||||
maxChunkSize = 150 * 1024 * 1024
|
||||
defaultChunkSize = 48 * fs.MebiByte
|
||||
maxChunkSize = 150 * fs.MebiByte
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -120,17 +122,30 @@ func init() {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Dropbox App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: fmt.Sprintf("Upload chunk size. Max %v.", fs.SizeSuffix(maxChunkSize)),
|
||||
Name: "chunk_size",
|
||||
Help: fmt.Sprintf(`Upload chunk size. (< %v).
|
||||
|
||||
Any files larger than this will be uploaded in chunks of this size.
|
||||
|
||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
||||
deal with retries. Setting this larger will increase the speed
|
||||
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
||||
memory. It can be set smaller if you are tight on memory.`, fs.SizeSuffix(maxChunkSize)),
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Help: "Impersonate this user when using a business account.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@@ -142,6 +157,7 @@ type Fs struct {
|
||||
srv files.Client // the connection to the dropbox server
|
||||
sharing sharing.Client // as above, but for generating sharing links
|
||||
users users.Client // as above, but for accessing user information
|
||||
team team.Client // for the Teams API
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
@@ -188,13 +204,41 @@ func shouldRetry(err error) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
baseErrString := errors.Cause(err).Error()
|
||||
// FIXME there is probably a better way of doing this!
|
||||
// handle any official Retry-After header from Dropbox's SDK first
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behaviour for backward compatibility
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
if cs > maxChunkSize {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -203,8 +247,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize > maxChunkSize {
|
||||
return nil, errors.Errorf("chunk size too big, must be < %v", maxChunkSize)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "dropbox: chunk size")
|
||||
}
|
||||
|
||||
// Convert the old token if it exists. The old token was just
|
||||
@@ -235,6 +280,29 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
Client: oAuthClient, // maybe???
|
||||
HeaderGenerator: f.headerGenerator,
|
||||
}
|
||||
|
||||
// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
|
||||
f.team = team.New(config)
|
||||
|
||||
if opt.Impersonate != "" {
|
||||
|
||||
user := team.UserSelectorArg{
|
||||
Email: opt.Impersonate,
|
||||
}
|
||||
user.Tag = "email"
|
||||
|
||||
members := []*team.UserSelectorArg{&user}
|
||||
args := team.NewMembersGetInfoArgs(members)
|
||||
|
||||
memberIds, err := f.team.MembersGetInfo(args)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
|
||||
}
|
||||
|
||||
config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||
}
|
||||
|
||||
f.srv = files.New(config)
|
||||
f.sharing = sharing.New(config)
|
||||
f.users = users.New(config)
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test Dropbox filesystem interface
|
||||
package dropbox_test
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/dropbox"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,15 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDropbox:",
|
||||
NilObject: (*dropbox.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -704,6 +704,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
// remove the file if upload failed
|
||||
remove := func() {
|
||||
// Give the FTP server a chance to get its internal state in order after the error.
|
||||
// The error may have been local in which case we closed the connection. The server
|
||||
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
|
||||
// able to think of a better method to find out if the server has finished - ncw
|
||||
time.Sleep(1 * time.Second)
|
||||
removeErr := o.Remove()
|
||||
if removeErr != nil {
|
||||
fs.Debugf(o, "Failed to remove: %v", removeErr)
|
||||
@@ -717,7 +722,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
err = c.Stor(path, in)
|
||||
if err != nil {
|
||||
_ = c.Quit()
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
remove()
|
||||
return errors.Wrap(err, "update stor")
|
||||
}
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Package googlecloudstorage provides an interface to Google Cloud Storage
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
|
||||
/*
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
@@ -193,7 +193,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
}
|
||||
err := o.stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Stat failed")
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
@@ -416,6 +416,9 @@ func (o *Object) url() string {
|
||||
func (o *Object) stat() error {
|
||||
url := o.url()
|
||||
res, err := o.fs.httpClient.Head(url)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stat")
|
||||
|
||||
@@ -144,6 +144,11 @@ func TestNewObject(t *testing.T) {
|
||||
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
|
||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||
|
||||
// check object not found
|
||||
o, err = f.NewObject("not found.txt")
|
||||
assert.Nil(t, o)
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
|
||||
@@ -80,7 +80,7 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "unlink",
|
||||
Help: "Remove existing public link to file/folder with link command rather than creating.",
|
||||
Help: "Remove existing public link to file/folder with link command rather than creating.\nDefault is false, meaning link command will create or retrieve public link.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -199,7 +199,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
||||
func (f *Fs) getAccountInfo() (info *api.AccountInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: rest.URLPathEscape(f.user),
|
||||
Path: urlPathEscape(f.user),
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
@@ -220,7 +220,7 @@ func (f *Fs) setEndpointURL(mountpoint string) (err error) {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get endpoint url")
|
||||
}
|
||||
f.endpointURL = rest.URLPathEscape(path.Join(info.Username, defaultDevice, mountpoint))
|
||||
f.endpointURL = urlPathEscape(path.Join(info.Username, defaultDevice, mountpoint))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -241,6 +241,11 @@ func errorHandler(resp *http.Response) error {
|
||||
return errResponse
|
||||
}
|
||||
|
||||
// Jottacloud want's '+' to be URL encoded even though the RFC states it's not reserved
|
||||
func urlPathEscape(in string) string {
|
||||
return strings.Replace(rest.URLPathEscape(in), "+", "%2B", -1)
|
||||
}
|
||||
|
||||
// filePathRaw returns an unescaped file path (f.root, file)
|
||||
func (f *Fs) filePathRaw(file string) string {
|
||||
return path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, file)))
|
||||
@@ -248,7 +253,7 @@ func (f *Fs) filePathRaw(file string) string {
|
||||
|
||||
// filePath returns a escaped file path (f.root, file)
|
||||
func (f *Fs) filePath(file string) string {
|
||||
return rest.URLPathEscape(f.filePathRaw(file))
|
||||
return urlPathEscape(f.filePathRaw(file))
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, remote)
|
||||
@@ -459,12 +464,12 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f
|
||||
if folder.Deleted {
|
||||
return nil
|
||||
}
|
||||
folderPath := path.Join(folder.Path, folder.Name)
|
||||
remoteDirLength := len(folderPath) - pathPrefixLength
|
||||
folderPath := restoreReservedChars(path.Join(folder.Path, folder.Name))
|
||||
folderPathLength := len(folderPath)
|
||||
var remoteDir string
|
||||
if remoteDirLength > 0 {
|
||||
remoteDir = restoreReservedChars(folderPath[pathPrefixLength+1:])
|
||||
if remoteDirLength > startPathLength {
|
||||
if folderPathLength > pathPrefixLength {
|
||||
remoteDir = folderPath[pathPrefixLength+1:]
|
||||
if folderPathLength > startPathLength {
|
||||
d := fs.NewDir(remoteDir, time.Time(folder.ModifiedAt))
|
||||
err := fn(d)
|
||||
if err != nil {
|
||||
@@ -1080,6 +1085,7 @@ func (o *Object) Remove() error {
|
||||
Method: "POST",
|
||||
Path: o.filePath(),
|
||||
Parameters: url.Values{},
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
if o.fs.opt.HardDelete {
|
||||
|
||||
@@ -4,50 +4,25 @@ import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// A test reader to return a test pattern of size
|
||||
type testReader struct {
|
||||
size int64
|
||||
c byte
|
||||
}
|
||||
|
||||
// Reader is the interface that wraps the basic Read method.
|
||||
func (r *testReader) Read(p []byte) (n int, err error) {
|
||||
for i := range p {
|
||||
if r.size <= 0 {
|
||||
return n, io.EOF
|
||||
}
|
||||
p[i] = r.c
|
||||
r.c = (r.c + 1) % 253
|
||||
r.size--
|
||||
n++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestReadMD5(t *testing.T) {
|
||||
// smoke test the reader
|
||||
b, err := ioutil.ReadAll(&testReader{size: 10})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, b)
|
||||
|
||||
// Check readMD5 for different size and threshold
|
||||
for _, size := range []int64{0, 1024, 10 * 1024, 100 * 1024} {
|
||||
t.Run(fmt.Sprintf("%d", size), func(t *testing.T) {
|
||||
hasher := md5.New()
|
||||
n, err := io.Copy(hasher, &testReader{size: size})
|
||||
n, err := io.Copy(hasher, readers.NewPatternReader(size))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, n, size)
|
||||
wantMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
|
||||
for _, threshold := range []int64{512, 1024, 10 * 1024, 20 * 1024} {
|
||||
t.Run(fmt.Sprintf("%d", threshold), func(t *testing.T) {
|
||||
in := &testReader{size: size}
|
||||
in := readers.NewPatternReader(size)
|
||||
gotMD5, out, cleanup, err := readMD5(in, size, threshold)
|
||||
defer cleanup()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -27,21 +27,14 @@ import (
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'+': '+', // FULLWIDTH PLUS SIGN
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
'!': '!', // FULLWIDTH EXCLAMATION MARK
|
||||
'&': '&', // FULLWIDTH AMPERSAND
|
||||
':': ':', // FULLWIDTH COLON
|
||||
';': ';', // FULLWIDTH SEMICOLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'#': '#', // FULLWIDTH NUMBER SIGN
|
||||
'%': '%', // FULLWIDTH PERCENT SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'\'': ''', // FULLWIDTH APOSTROPHE
|
||||
'~': '~', // FULLWIDTH TILDE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
|
||||
@@ -9,8 +9,8 @@ func TestReplace(t *testing.T) {
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\+*<>?!&:;|#%"'~`, `\+*<>?!&:;|#%"'~`},
|
||||
{`\+*<>?!&:;|#%"'~\+*<>?!&:;|#%"'~`, `\+*<>?!&:;|#%"'~\+*<>?!&:;|#%"'~`},
|
||||
{`\*<>?:;|"`, `\*<>?:;|"`},
|
||||
{`\*<>?:;|"\*<>?:;|"`, `\*<>?:;|"\*<>?:;|"`},
|
||||
{" leading space", "␠leading space"},
|
||||
{"trailing space ", "trailing space␠"},
|
||||
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||
|
||||
@@ -49,19 +49,33 @@ func init() {
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: "Don't warn about skipped symlinks.",
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
This flag disables warning messages on skipped symlinks or junction
|
||||
points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_unicode_normalization",
|
||||
Help: "Don't apply unicode normalization to paths and filenames",
|
||||
Name: "no_unicode_normalization",
|
||||
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
|
||||
This flag is deprecated now. Rclone no longer normalizes unicode file
|
||||
names, but it compares them with unicode normalization in the sync
|
||||
routine instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_updated",
|
||||
Help: "Don't check to see if the files change during upload",
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy
|
||||
- source file is being updated" if the file changes during upload.
|
||||
|
||||
However on some file systems this modification time check may fail (eg
|
||||
[Glusterfs #2206](https://github.com/ncw/rclone/issues/2206)) so this
|
||||
check can be disabled with this flag.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -817,6 +831,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = preAllocate(src.Size(), out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
|
||||
// Calculate the hash of the object we are reading as we go along
|
||||
hash, err := hash.NewMultiHasherTypes(hashes)
|
||||
if err != nil {
|
||||
|
||||
10
backend/local/preallocate_other.go
Normal file
10
backend/local/preallocate_other.go
Normal file
@@ -0,0 +1,10 @@
|
||||
//+build !windows,!linux
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
return nil
|
||||
}
|
||||
22
backend/local/preallocate_unix.go
Normal file
22
backend/local/preallocate_unix.go
Normal file
@@ -0,0 +1,22 @@
|
||||
//+build linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
if size <= 0 {
|
||||
return nil
|
||||
}
|
||||
err := unix.Fallocate(int(out.Fd()), unix.FALLOC_FL_KEEP_SIZE, 0, size)
|
||||
// FIXME could be doing something here
|
||||
// if err == unix.ENOSPC {
|
||||
// log.Printf("No space")
|
||||
// }
|
||||
return err
|
||||
}
|
||||
79
backend/local/preallocate_windows.go
Normal file
79
backend/local/preallocate_windows.go
Normal file
@@ -0,0 +1,79 @@
|
||||
//+build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var (
|
||||
ntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
ntQueryVolumeInformationFile = ntdll.NewProc("NtQueryVolumeInformationFile")
|
||||
ntSetInformationFile = ntdll.NewProc("NtSetInformationFile")
|
||||
)
|
||||
|
||||
type fileAllocationInformation struct {
|
||||
AllocationSize uint64
|
||||
}
|
||||
|
||||
type fileFsSizeInformation struct {
|
||||
TotalAllocationUnits uint64
|
||||
AvailableAllocationUnits uint64
|
||||
SectorsPerAllocationUnit uint32
|
||||
BytesPerSector uint32
|
||||
}
|
||||
|
||||
type ioStatusBlock struct {
|
||||
Status, Information uintptr
|
||||
}
|
||||
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
if size <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
iosb ioStatusBlock
|
||||
fsSizeInfo fileFsSizeInformation
|
||||
allocInfo fileAllocationInformation
|
||||
)
|
||||
|
||||
// Query info about the block sizes on the file system
|
||||
_, _, e1 := ntQueryVolumeInformationFile.Call(
|
||||
uintptr(out.Fd()),
|
||||
uintptr(unsafe.Pointer(&iosb)),
|
||||
uintptr(unsafe.Pointer(&fsSizeInfo)),
|
||||
uintptr(unsafe.Sizeof(fsSizeInfo)),
|
||||
uintptr(3), // FileFsSizeInformation
|
||||
)
|
||||
if e1 != nil && e1 != syscall.Errno(0) {
|
||||
return errors.Wrap(e1, "preAllocate NtQueryVolumeInformationFile failed")
|
||||
}
|
||||
|
||||
// Calculate the allocation size
|
||||
clusterSize := uint64(fsSizeInfo.BytesPerSector) * uint64(fsSizeInfo.SectorsPerAllocationUnit)
|
||||
if clusterSize <= 0 {
|
||||
return errors.Errorf("preAllocate clusterSize %d <= 0", clusterSize)
|
||||
}
|
||||
allocInfo.AllocationSize = (1 + uint64(size-1)/clusterSize) * clusterSize
|
||||
|
||||
// Ask for the allocation
|
||||
_, _, e1 = ntSetInformationFile.Call(
|
||||
uintptr(out.Fd()),
|
||||
uintptr(unsafe.Pointer(&iosb)),
|
||||
uintptr(unsafe.Pointer(&allocInfo)),
|
||||
uintptr(unsafe.Sizeof(allocInfo)),
|
||||
uintptr(19), // FileAllocationInformation
|
||||
)
|
||||
if e1 != nil && e1 != syscall.Errno(0) {
|
||||
return errors.Wrap(e1, "preAllocate NtSetInformationFile failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -63,13 +63,20 @@ func init() {
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "debug",
|
||||
Help: "Output more debug from Mega.",
|
||||
Name: "debug",
|
||||
Help: `Output more debug from Mega.
|
||||
|
||||
If this flag is set (along with -vv) it will print further debugging
|
||||
information from the mega backend.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Delete files permanently rather than putting them into the trash.",
|
||||
Name: "hard_delete",
|
||||
Help: `Delete files permanently rather than putting them into the trash.
|
||||
|
||||
Normally the mega backend will put all deletions into the trash rather
|
||||
than permanently deleting them. If you specify this then rclone will
|
||||
permanently delete objects instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
|
||||
@@ -9,6 +9,9 @@ import (
|
||||
|
||||
const (
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
|
||||
// PackageTypeOneNote is the package type value for OneNote files
|
||||
PackageTypeOneNote = "oneNote"
|
||||
)
|
||||
|
||||
// Error is returned from one drive when things go wrong
|
||||
@@ -107,6 +110,7 @@ type RemoteItemFacet struct {
|
||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
|
||||
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
|
||||
Size int64 `json:"size"` // Size of the item in bytes. Read-only.
|
||||
@@ -147,6 +151,13 @@ type FileSystemInfoFacet struct {
|
||||
type DeletedFacet struct {
|
||||
}
|
||||
|
||||
// PackageFacet indicates that a DriveItem is the top level item
|
||||
// in a "package" or a collection of items that should be treated as a collection instead of individual items.
|
||||
// `oneNote` is the only currently defined value.
|
||||
type PackageFacet struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// Item represents metadata for an item in OneDrive
|
||||
type Item struct {
|
||||
ID string `json:"id"` // The unique identifier of the item within the Drive. Read-only.
|
||||
@@ -170,6 +181,7 @@ type Item struct {
|
||||
// Audio *AudioFacet `json:"audio"` // Audio metadata, if the item is an audio file. Read-only.
|
||||
// Video *VideoFacet `json:"video"` // Video metadata, if the item is a video. Read-only.
|
||||
// Location *LocationFacet `json:"location"` // Location metadata, if the item has location data. Read-only.
|
||||
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
|
||||
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
|
||||
}
|
||||
|
||||
@@ -238,6 +250,28 @@ type MoveItemRequest struct {
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo,omitempty"` // File system information on client. Read-write.
|
||||
}
|
||||
|
||||
//CreateShareLinkRequest is the request to create a sharing link
|
||||
//Always Type:view and Scope:anonymous for public sharing
|
||||
type CreateShareLinkRequest struct {
|
||||
Type string `json:"type"` //Link type in View, Edit or Embed
|
||||
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization
|
||||
}
|
||||
|
||||
//CreateShareLinkResponse is the response from CreateShareLinkRequest
|
||||
type CreateShareLinkResponse struct {
|
||||
ID string `json:"id"`
|
||||
Roles []string `json:"roles"`
|
||||
Link struct {
|
||||
Type string `json:"type"`
|
||||
Scope string `json:"scope"`
|
||||
WebURL string `json:"webUrl"`
|
||||
Application struct {
|
||||
ID string `json:"id"`
|
||||
DisplayName string `json:"displayName"`
|
||||
} `json:"application"`
|
||||
} `json:"link"`
|
||||
}
|
||||
|
||||
// AsyncOperationStatus provides information on the status of a asynchronous job progress.
|
||||
//
|
||||
// The following API calls return AsyncOperationStatus resources:
|
||||
@@ -251,6 +285,7 @@ type AsyncOperationStatus struct {
|
||||
|
||||
// GetID returns a normalized ID of the item
|
||||
// If DriveID is known it will be prefixed to the ID with # seperator
|
||||
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
|
||||
func (i *Item) GetID() string {
|
||||
if i.IsRemote() && i.RemoteItem.ID != "" {
|
||||
return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID
|
||||
@@ -281,6 +316,24 @@ func (i *Item) GetFolder() *FolderFacet {
|
||||
return i.Folder
|
||||
}
|
||||
|
||||
// GetPackage returns a normalized Package of the item
|
||||
func (i *Item) GetPackage() *PackageFacet {
|
||||
if i.IsRemote() && i.RemoteItem.Package != nil {
|
||||
return i.RemoteItem.Package
|
||||
}
|
||||
return i.Package
|
||||
}
|
||||
|
||||
// GetPackageType returns the package type of the item if available,
|
||||
// otherwise ""
|
||||
func (i *Item) GetPackageType() string {
|
||||
pack := i.GetPackage()
|
||||
if pack == nil {
|
||||
return ""
|
||||
}
|
||||
return pack.Type
|
||||
}
|
||||
|
||||
// GetFile returns a normalized File of the item
|
||||
func (i *Item) GetFile() *FileFacet {
|
||||
if i.IsRemote() && i.RemoteItem.File != nil {
|
||||
|
||||
@@ -43,6 +43,8 @@ const (
|
||||
driveTypePersonal = "personal"
|
||||
driveTypeBusiness = "business"
|
||||
driveTypeSharepoint = "documentLibrary"
|
||||
defaultChunkSize = 10 * fs.MebiByte
|
||||
chunkSizeMultiple = 320 * fs.KibiByte
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -212,9 +214,12 @@ func init() {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Microsoft App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Chunk size to upload files with - must be multiple of 320k.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k.
|
||||
|
||||
Above this size files will be chunked - must be multiple of 320k. Note
|
||||
that the chunks will be buffered into memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "drive_id",
|
||||
@@ -226,15 +231,27 @@ func init() {
|
||||
Help: "The type of the drive ( personal | business | documentLibrary )",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "expose_onenote_files",
|
||||
Help: `Set to make OneNote files show up in directory listings.
|
||||
|
||||
By default rclone will hide OneNote files in directory listings because
|
||||
operations like "Open" and "Update" won't work on them. But this
|
||||
behaviour may also prevent you from deleting them. If you want to
|
||||
delete OneNote files or otherwise want them to show up in directory
|
||||
listing, set this option.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
}
|
||||
|
||||
// Fs represents a remote one drive
|
||||
@@ -255,15 +272,16 @@ type Fs struct {
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
hasMetaData bool // whether info below has been set
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
quickxorhash string // QuickXorHash of the object content
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
hasMetaData bool // whether info below has been set
|
||||
isOneNoteFile bool // Whether the object is a OneNote file
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
quickxorhash string // QuickXorHash of the object content
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -316,20 +334,10 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||
var opts rest.Opts
|
||||
if len(path) == 0 {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
}
|
||||
}
|
||||
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
|
||||
// if `relPath` == "", it reads the metadata for the item with that ID.
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+rest.URLPathEscape(replaceReservedChars(relPath)))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -338,6 +346,72 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Respon
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path (relative to the absolute root)
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||
firstSlashIndex := strings.IndexRune(path, '/')
|
||||
|
||||
if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
|
||||
var opts rest.Opts
|
||||
if len(path) == 0 {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// The following branch handles the case when we're using OneDrive Personal and the path is in a folder.
|
||||
// For OneDrive Personal, we need to consider the "shared with me" folders.
|
||||
// An item in such a folder can only be addressed by its ID relative to the sharer's driveID or
|
||||
// by its path relative to the folder's ID relative to the sharer's driveID.
|
||||
// Note: A "shared with me" folder can only be placed in the sharee's absolute root.
|
||||
// So we read metadata relative to a suitable folder's normalized ID.
|
||||
var dirCacheFoundRoot bool
|
||||
var rootNormalizedID string
|
||||
if f.dirCache != nil {
|
||||
var ok bool
|
||||
if rootNormalizedID, ok = f.dirCache.Get(""); ok {
|
||||
dirCacheFoundRoot = true
|
||||
}
|
||||
}
|
||||
|
||||
relPath, insideRoot := getRelativePathInsideBase(f.root, path)
|
||||
var firstDir, baseNormalizedID string
|
||||
if !insideRoot || !dirCacheFoundRoot {
|
||||
// We do not have the normalized ID in dirCache for our query to base on. Query it manually.
|
||||
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
||||
info, resp, err := f.readMetaDataForPath(firstDir)
|
||||
if err != nil {
|
||||
return info, resp, err
|
||||
}
|
||||
baseNormalizedID = info.GetID()
|
||||
} else {
|
||||
if f.root != "" {
|
||||
// Read metadata based on root
|
||||
baseNormalizedID = rootNormalizedID
|
||||
} else {
|
||||
// Read metadata based on firstDir
|
||||
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
||||
baseNormalizedID, err = f.dirCache.FindDir(firstDir, false)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return f.readMetaDataForPathRelativeToID(baseNormalizedID, relPath)
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
// Decode error response
|
||||
@@ -352,6 +426,25 @@ func errorHandler(resp *http.Response) error {
|
||||
return errResponse
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs%chunkSizeMultiple != 0 {
|
||||
return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
|
||||
}
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -360,18 +453,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize%(320*1024) != 0 {
|
||||
return nil, errors.Errorf("chunk size %d is not a multiple of 320k", opt.ChunkSize)
|
||||
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "onedrive: chunk size")
|
||||
}
|
||||
|
||||
if opt.DriveID == "" || opt.DriveType == "" {
|
||||
log.Fatalf("Unable to get drive_id and drive_type. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend.")
|
||||
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure OneDrive")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
@@ -409,16 +504,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, rootInfo.ID, &newF)
|
||||
newF.root = newRoot
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootInfo.ID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -426,8 +521,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -470,24 +570,20 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
parent, ok := f.dirCache.GetInv(pathID)
|
||||
_, ok := f.dirCache.GetInv(pathID)
|
||||
if !ok {
|
||||
return "", false, errors.New("couldn't find parent ID")
|
||||
}
|
||||
path := leaf
|
||||
if parent != "" {
|
||||
path = parent + "/" + path
|
||||
}
|
||||
if f.dirCache.FoundRoot() {
|
||||
path = f.rootSlash() + path
|
||||
}
|
||||
info, resp, err := f.readMetaDataForPath(path)
|
||||
info, resp, err := f.readMetaDataForPathRelativeToID(pathID, leaf)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return "", false, nil
|
||||
}
|
||||
return "", false, err
|
||||
}
|
||||
if info.GetPackageType() == api.PackageTypeOneNote {
|
||||
return "", false, errors.New("found OneNote file when looking for folder")
|
||||
}
|
||||
if info.GetFolder() == nil {
|
||||
return "", false, errors.New("found file when looking for folder")
|
||||
}
|
||||
@@ -596,6 +692,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
|
||||
fs.Debugf(info.Name, "OneNote file not shown in directory listing")
|
||||
return false
|
||||
}
|
||||
|
||||
remote := path.Join(dir, info.GetName())
|
||||
folder := info.GetFolder()
|
||||
if folder != nil {
|
||||
@@ -815,13 +916,13 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
|
||||
opts.NoResponse = true
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copyReq := api.CopyItemRequest{
|
||||
Name: &replacedLeaf,
|
||||
ParentReference: api.ItemReference{
|
||||
DriveID: f.driveID,
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
},
|
||||
}
|
||||
@@ -888,15 +989,23 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
_, srcObjDriveID, _ := parseNormalizedID(srcObj.id)
|
||||
|
||||
if dstDriveID != srcObjDriveID {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
// "Items cannot be moved between Drives using this request."
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Move the object
|
||||
opts := newOptsCall(srcObj.id, "PATCH", "")
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
ID: id,
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
@@ -972,7 +1081,20 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parsedDstDirID, _, _ := parseDirID(dstDirectoryID)
|
||||
parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, srcDriveID, _ := parseNormalizedID(srcID)
|
||||
|
||||
if dstDriveID != srcDriveID {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
// "Items cannot be moved between Drives using this request."
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
@@ -986,14 +1108,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get timestamps of src so they can be preserved
|
||||
srcInfo, _, err := srcFs.readMetaDataForPath(srcPath)
|
||||
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(srcID, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1003,7 +1119,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
ID: parsedDstDirID,
|
||||
DriveID: dstDriveID,
|
||||
ID: parsedDstDirID,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
@@ -1064,6 +1181,32 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.QuickXorHash)
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without accout.
|
||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
info, _, err := f.readMetaDataForPath(f.srvPath(remote))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
opts := newOptsCall(info.GetID(), "POST", "/createLink")
|
||||
|
||||
share := api.CreateShareLinkRequest{
|
||||
Type: "view",
|
||||
Scope: "anonymous",
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var result api.CreateShareLinkResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &share, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return "", err
|
||||
}
|
||||
return result.Link.WebURL, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -1084,9 +1227,14 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server given a remote
|
||||
func (f *Fs) srvPath(remote string) string {
|
||||
return replaceReservedChars(f.rootSlash() + remote)
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
func (o *Object) srvPath() string {
|
||||
return replaceReservedChars(o.fs.rootSlash() + o.remote)
|
||||
return o.fs.srvPath(o.remote)
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
@@ -1121,6 +1269,8 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = info.GetSize()
|
||||
|
||||
o.isOneNoteFile = info.GetPackageType() == api.PackageTypeOneNote
|
||||
|
||||
// Docs: https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/hashes
|
||||
//
|
||||
// We use SHA1 for onedrive personal and QuickXorHash for onedrive for business
|
||||
@@ -1185,13 +1335,13 @@ func (o *Object) ModTime() time.Time {
|
||||
// setModTime sets the modification time of the local fs object
|
||||
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()),
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
@@ -1232,6 +1382,10 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.id == "" {
|
||||
return nil, errors.New("can't download - no id")
|
||||
}
|
||||
if o.isOneNoteFile {
|
||||
return nil, errors.New("can't open a OneNote file")
|
||||
}
|
||||
|
||||
fs.FixRangeOption(options, o.size)
|
||||
var resp *http.Response
|
||||
opts := newOptsCall(o.id, "GET", "/content")
|
||||
@@ -1255,7 +1409,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
id, drive, rootURL := parseDirID(directoryID)
|
||||
id, drive, rootURL := parseNormalizedID(directoryID)
|
||||
var opts rest.Opts
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
@@ -1275,6 +1429,12 @@ func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUpl
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &createRequest, &response)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
|
||||
// Make the error more user-friendly
|
||||
err = errors.New(err.Error() + " (is it a OneNote file?)")
|
||||
}
|
||||
}
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return response, err
|
||||
@@ -1382,13 +1542,13 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
var resp *http.Response
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
@@ -1407,6 +1567,12 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
|
||||
// Make the error more user-friendly
|
||||
err = errors.New(err.Error() + " (is it a OneNote file?)")
|
||||
}
|
||||
}
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1425,6 +1591,10 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if o.hasMetaData && o.isOneNoteFile {
|
||||
return errors.New("can't upload content to a OneNote file")
|
||||
}
|
||||
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
|
||||
@@ -1461,8 +1631,8 @@ func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
func newOptsCall(id string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseDirID(id)
|
||||
func newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseNormalizedID(normalizedID)
|
||||
|
||||
if drive != "" {
|
||||
return rest.Opts{
|
||||
@@ -1477,7 +1647,10 @@ func newOptsCall(id string, method string, route string) (opts rest.Opts) {
|
||||
}
|
||||
}
|
||||
|
||||
func parseDirID(ID string) (string, string, string) {
|
||||
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
|
||||
// and returns itemID, driveID, rootURL.
|
||||
// Such a normalized ID can come from (*Item).GetID()
|
||||
func parseNormalizedID(ID string) (string, string, string) {
|
||||
if strings.Index(ID, "#") >= 0 {
|
||||
s := strings.Split(ID, "#")
|
||||
return s[1], s[0], graphURL + "/drives"
|
||||
@@ -1485,6 +1658,21 @@ func parseDirID(ID string) (string, string, string) {
|
||||
return ID, "", ""
|
||||
}
|
||||
|
||||
// getRelativePathInsideBase checks if `target` is inside `base`. If so, it
|
||||
// returns a relative path for `target` based on `base` and a boolean `true`.
|
||||
// Otherwise returns "", false.
|
||||
func getRelativePathInsideBase(base, target string) (string, bool) {
|
||||
if base == "" {
|
||||
return target, true
|
||||
}
|
||||
|
||||
baseSlash := base + "/"
|
||||
if strings.HasPrefix(target+"/", baseSlash) {
|
||||
return target[len(baseSlash):], true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1494,6 +1682,7 @@ var (
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test OneDrive filesystem interface
|
||||
package onedrive_test
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/onedrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,15 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestOneDrive:",
|
||||
NilObject: (*onedrive.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
CeilChunkSize: fstests.NextMultipleOf(chunkSizeMultiple),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -20,6 +21,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/dircache"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -177,17 +179,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, "0", &newF)
|
||||
newF.root = newRoot
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, "0", &tempF)
|
||||
tempF.root = newRoot
|
||||
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -195,8 +197,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -925,8 +932,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// resp.Body.Close()
|
||||
// fs.Debugf(nil, "PostOpen: %#v", openResponse)
|
||||
|
||||
// 1 MB chunks size
|
||||
// 10 MB chunks size
|
||||
chunkSize := int64(1024 * 1024 * 10)
|
||||
buf := make([]byte, int(chunkSize))
|
||||
chunkOffset := int64(0)
|
||||
remainingBytes := size
|
||||
chunkCounter := 0
|
||||
@@ -939,14 +947,19 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
remainingBytes -= currentChunkSize
|
||||
fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes)
|
||||
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, currentChunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
var formBody bytes.Buffer
|
||||
w := multipart.NewWriter(&formBody)
|
||||
fw, err := w.CreateFormFile("file_data", o.remote)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if _, err = io.CopyN(fw, in, currentChunkSize); err != nil {
|
||||
if _, err = io.Copy(fw, chunk); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Add session_id
|
||||
@@ -1077,7 +1090,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + rest.URLPathEscape(replaceReservedChars(leaf)),
|
||||
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + url.QueryEscape(replaceReservedChars(leaf)),
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
|
||||
@@ -246,7 +246,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Pcloud: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Pcloud")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
@@ -276,16 +276,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, rootID, &newF)
|
||||
newF.root = newRoot
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -293,8 +293,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "connection_retries",
|
||||
Help: "Number of connnection retries.",
|
||||
Help: "Number of connection retries.",
|
||||
Default: 3,
|
||||
Advanced: true,
|
||||
}},
|
||||
|
||||
340
backend/s3/s3.go
340
backend/s3/s3.go
@@ -291,7 +291,11 @@ func init() {
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "s3.wasabisys.com",
|
||||
Help: "Wasabi Object Storage",
|
||||
Help: "Wasabi US East endpoint",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.us-west-1.wasabisys.com",
|
||||
Help: "Wasabi US West endpoint",
|
||||
Provider: "Wasabi",
|
||||
}},
|
||||
}, {
|
||||
@@ -448,7 +452,12 @@ func init() {
|
||||
Provider: "!AWS,IBMCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: "Canned ACL used when creating buckets and/or storing objects in S3.\nFor more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when server side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
@@ -517,7 +526,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing objects in S3.",
|
||||
Help: "The storage class to use when storing new objects in S3.",
|
||||
Provider: "AWS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
@@ -534,11 +543,31 @@ func init() {
|
||||
}, {
|
||||
Value: "ONEZONE_IA",
|
||||
Help: "One Zone Infrequent Access storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Glacier storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Chunk size to use for uploading",
|
||||
Default: fs.SizeSuffix(s3manager.MinUploadPartSize),
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--s3-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
@@ -548,31 +577,57 @@ func init() {
|
||||
}, {
|
||||
Name: "session_token",
|
||||
Help: "An AWS session token",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: "Concurrency for multipart uploads.",
|
||||
Default: 2,
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: 4,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "force_path_style",
|
||||
Help: "If true use path style access if false use virtual hosted style.\nSome providers (eg Aliyun OSS or Netease COS) require this.",
|
||||
Name: "force_path_style",
|
||||
Help: `If true use path style access if false use virtual hosted style.
|
||||
|
||||
If this is true (the default) then rclone will use path style access,
|
||||
if false then rclone will use virtual path style. See [the AWS S3
|
||||
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
||||
for more info.
|
||||
|
||||
Some providers (eg Aliyun OSS or Netease COS) require this set to false.`,
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "v2_auth",
|
||||
Help: `If true use v2 authentication.
|
||||
|
||||
If this is false (the default) then rclone will use v4 authentication.
|
||||
If it is set then rclone will use v2 authentication.
|
||||
|
||||
Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Constants
|
||||
const (
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -588,11 +643,13 @@ type Options struct {
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ForcePathStyle bool `config:"force_path_style"`
|
||||
V2Auth bool `config:"v2_auth"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -608,6 +665,7 @@ type Fs struct {
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
}
|
||||
|
||||
// Object describes a s3 object
|
||||
@@ -765,9 +823,22 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle)
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
ses := session.New()
|
||||
c := s3.New(ses, awsConfig)
|
||||
if opt.Region == "other-v2-signature" {
|
||||
awsSessionOpts := session.Options{
|
||||
Config: *awsConfig,
|
||||
}
|
||||
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
|
||||
// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
|
||||
awsSessionOpts.SharedConfigState = session.SharedConfigEnable
|
||||
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
|
||||
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
|
||||
awsSessionOpts.Config.Credentials = nil
|
||||
}
|
||||
ses, err := session.NewSessionWithOptions(awsSessionOpts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
c := s3.New(ses)
|
||||
if opt.V2Auth || opt.Region == "other-v2-signature" {
|
||||
fs.Debugf(nil, "Using v2 auth")
|
||||
signer := func(req *request.Request) {
|
||||
// Ignore AnonymousCredentials object
|
||||
@@ -783,6 +854,36 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
return c, ses, nil
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -791,8 +892,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) {
|
||||
return nil, errors.Errorf("s3 chunk size (%v) must be >= %v", opt.ChunkSize, fs.SizeSuffix(s3manager.MinUploadPartSize))
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "s3: chunk size")
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "s3: upload cutoff")
|
||||
}
|
||||
bucket, directory, err := s3ParsePath(root)
|
||||
if err != nil {
|
||||
@@ -810,6 +916,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
bucket: bucket,
|
||||
ses: ses,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -1236,10 +1343,20 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
source := pathEscape(srcFs.bucket + "/" + srcFs.root + srcObj.remote)
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.opt.ACL,
|
||||
Key: &key,
|
||||
CopySource: &source,
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
}
|
||||
if f.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &f.opt.ServerSideEncryption
|
||||
}
|
||||
if f.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
|
||||
}
|
||||
if f.opt.StorageClass != "" {
|
||||
req.StorageClass = &f.opt.StorageClass
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.CopyObject(&req)
|
||||
return shouldRetry(err)
|
||||
@@ -1409,6 +1526,15 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
Metadata: o.meta,
|
||||
MetadataDirective: &directive,
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := o.fs.c.CopyObject(&req)
|
||||
return shouldRetry(err)
|
||||
@@ -1465,38 +1591,46 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
modTime := src.ModTime()
|
||||
size := src.Size()
|
||||
|
||||
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
var uploader *s3manager.Uploader
|
||||
if multipart {
|
||||
uploader = s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
// S3 file size limit. Rounded up to nearest MB.
|
||||
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
return
|
||||
}
|
||||
// Adjust PartSize until the number of parts is small enough.
|
||||
if size/u.PartSize >= s3manager.MaxUploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
}
|
||||
})
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
// S3 file size limit. Rounded up to nearest MB.
|
||||
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
return
|
||||
}
|
||||
// Adjust PartSize until the number of parts is small enough.
|
||||
if size/u.PartSize >= s3manager.MaxUploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Set the mtime in the meta data
|
||||
metadata := map[string]*string{
|
||||
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
||||
}
|
||||
|
||||
if !o.fs.opt.DisableChecksum && size > uploader.PartSize {
|
||||
// read the md5sum if available for non multpart and if
|
||||
// disable checksum isn't present.
|
||||
var md5sum string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
hash, err := src.Hash(hash.MD5)
|
||||
|
||||
if err == nil && matchMd5.MatchString(hash) {
|
||||
hashBytes, err := hex.DecodeString(hash)
|
||||
|
||||
if err == nil {
|
||||
metadata[metaMD5Hash] = aws.String(base64.StdEncoding.EncodeToString(hashBytes))
|
||||
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if multipart {
|
||||
metadata[metaMD5Hash] = &md5sum
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1505,30 +1639,98 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
mimeType := fs.MimeType(src)
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = uploader.Upload(&req)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
if multipart {
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = uploader.Upload(&req)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
}
|
||||
if md5sum != "" {
|
||||
req.ContentMD5 = &md5sum
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
|
||||
// Create the request
|
||||
putObj, _ := o.fs.c.PutObjectRequest(&req)
|
||||
|
||||
// Sign it so we can upload using a presigned request.
|
||||
//
|
||||
// Note the SDK doesn't currently support streaming to
|
||||
// PutObject so we'll use this work-around.
|
||||
url, headers, err := putObj.PresignRequest(15 * time.Minute)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "s3 upload: sign request")
|
||||
}
|
||||
|
||||
// Set request to nil if empty so as not to make chunked encoding
|
||||
if size == 0 {
|
||||
in = nil
|
||||
}
|
||||
|
||||
// create the vanilla http request
|
||||
httpReq, err := http.NewRequest("PUT", url, in)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "s3 upload: new request")
|
||||
}
|
||||
|
||||
// set the headers we signed and the length
|
||||
httpReq.Header = headers
|
||||
httpReq.ContentLength = size
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Do(httpReq)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
body, err := rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
|
||||
return false, nil
|
||||
}
|
||||
err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
|
||||
return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test S3 filesystem interface
|
||||
package s3_test
|
||||
package s3
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/s3"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,19 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestS3:",
|
||||
NilObject: (*s3.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -44,16 +44,7 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
req.Header.Set("Date", date)
|
||||
|
||||
// Sort out URI
|
||||
uri := req.URL.Opaque
|
||||
if uri != "" {
|
||||
if strings.HasPrefix(uri, "//") {
|
||||
// Strip off //host/uri
|
||||
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
|
||||
req.URL.Opaque = uri // reset to plain URI otherwise Ceph gets confused
|
||||
}
|
||||
} else {
|
||||
uri = req.URL.Path
|
||||
}
|
||||
uri := req.URL.EscapedPath()
|
||||
if uri == "" {
|
||||
uri = "/"
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/xanzy/ssh-agent"
|
||||
sshagent "github.com/xanzy/ssh-agent"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
@@ -90,9 +90,20 @@ func init() {
|
||||
Help: "Allow asking for SFTP password when needed.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "path_override",
|
||||
Default: "",
|
||||
Help: "Override path used by SSH connection.",
|
||||
Name: "path_override",
|
||||
Default: "",
|
||||
Help: `Override path used by SSH connection.
|
||||
|
||||
This allows checksum calculation when SFTP and SSH paths are
|
||||
different. This issue affects among others Synology NAS boxes.
|
||||
|
||||
Shared folders can be found in directories representing volumes
|
||||
|
||||
rclone sync /home/local/directory remote:/directory --ssh-path-override /volume2/directory
|
||||
|
||||
Home directory can be found in a shared folder called "home"
|
||||
|
||||
rclone sync /home/local/directory remote:/home/directory --ssh-path-override /volume1/homes/USER/directory`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "set_modtime",
|
||||
@@ -583,12 +594,22 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
// Check to see if directory is empty as some servers will
|
||||
// delete recursively with RemoveDirectory
|
||||
entries, err := f.List(dir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
// Remove the directory
|
||||
root := path.Join(f.root, dir)
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
err = c.sftpClient.Remove(root)
|
||||
err = c.sftpClient.RemoveDirectory(root)
|
||||
f.putSftpConnection(&c, err)
|
||||
return err
|
||||
}
|
||||
@@ -758,6 +779,10 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
if o.fs.opt.DisableHashCheck {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
c, err := o.fs.getSftpConnection()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Hash get SFTP connection")
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -29,13 +30,32 @@ import (
|
||||
const (
|
||||
directoryMarkerContentType = "application/directory" // content type of directory marker objects
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
defaultChunkSize = 5 * fs.GibiByte
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// SharedOptions are shared between swift and hubic
|
||||
var SharedOptions = []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
Help: "Above this size files will be chunked into a _segments container.",
|
||||
Default: fs.SizeSuffix(5 * 1024 * 1024 * 1024),
|
||||
Name: "chunk_size",
|
||||
Help: `Above this size files will be chunked into a _segments container.
|
||||
|
||||
Above this size files will be chunked into a _segments container. The
|
||||
default for this is 5GB which is its maximum value.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_chunk",
|
||||
Help: `Don't chunk files during streaming upload.
|
||||
|
||||
When doing streaming uploads (eg using rcat or mount) setting this
|
||||
flag will cause the swift backend to not upload chunked files.
|
||||
|
||||
This will limit the maximum upload size to 5GB. However non chunked
|
||||
files are easier to deal with and have an MD5SUM.
|
||||
|
||||
Rclone will still chunk files bigger than chunk_size when doing normal
|
||||
copy operations.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}}
|
||||
|
||||
@@ -129,8 +149,13 @@ func init() {
|
||||
Value: "admin",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_policy",
|
||||
Help: "The storage policy to use when creating a new container",
|
||||
Name: "storage_policy",
|
||||
Help: `The storage policy to use when creating a new container
|
||||
|
||||
This applies the specified storage policy when creating a new
|
||||
container. The policy cannot be changed afterwards. The allowed
|
||||
configuration values and their meaning depend on your Swift storage
|
||||
provider.`,
|
||||
Default: "",
|
||||
Examples: []fs.OptionExample{{
|
||||
Help: "Default",
|
||||
@@ -164,6 +189,7 @@ type Options struct {
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NoChunk bool `config:"no_chunk"`
|
||||
}
|
||||
|
||||
// Fs represents a remote swift server
|
||||
@@ -178,16 +204,20 @@ type Fs struct {
|
||||
containerOK bool // true if we have created the container
|
||||
segmentsContainer string // container to store the segments (if any) in
|
||||
noCheckContainer bool // don't check the container before creating it
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
info swift.Object // Info from the swift object if known
|
||||
headers swift.Headers // The object headers if known
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64
|
||||
lastModified time.Time
|
||||
contentType string
|
||||
md5 string
|
||||
headers swift.Headers // The object headers if known
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -218,6 +248,32 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
401, // Unauthorized (eg "Token has expired")
|
||||
408, // Request Timeout
|
||||
409, // Conflict - various states that could be resolved on a retry
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
503, // Service Unavailable/Slow Down - "Reduce your request rate"
|
||||
504, // Gateway Time-out
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
// If this is an swift.Error object extract the HTTP error code
|
||||
if swiftError, ok := err.(*swift.Error); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if swiftError.StatusCode == e {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check for generic failure conditions
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// Pattern to match a swift path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
@@ -294,6 +350,22 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFsWithConnection constructs an Fs from the path, container:path
|
||||
// and authenticated connection.
|
||||
//
|
||||
@@ -312,6 +384,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
segmentsContainer: container + "_segments",
|
||||
root: directory,
|
||||
noCheckContainer: noCheckContainer,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -321,7 +394,11 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists - ignoring directory markers
|
||||
info, _, err := f.c.Object(container, directory)
|
||||
var info swift.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, _, err = f.c.Object(container, directory)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil && info.ContentType != directoryMarkerContentType {
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
@@ -344,6 +421,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "swift: chunk size")
|
||||
}
|
||||
|
||||
c, err := swiftConnection(opt, name)
|
||||
if err != nil {
|
||||
@@ -369,7 +450,10 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not headers
|
||||
o.info = *info
|
||||
err := o.decodeMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
@@ -407,7 +491,12 @@ func (f *Fs) listContainerRoot(container, root string, dir string, recurse bool,
|
||||
}
|
||||
rootLength := len(root)
|
||||
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
objects, err := f.c.Objects(container, opts)
|
||||
var objects []swift.Object
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = f.c.Objects(container, opts)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
for i := range objects {
|
||||
object := &objects[i]
|
||||
@@ -496,7 +585,11 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
containers, err := f.c.ContainersAll(nil)
|
||||
var containers []swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "container listing failed")
|
||||
}
|
||||
@@ -557,7 +650,12 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
containers, err := f.c.ContainersAll(nil)
|
||||
var containers []swift.Container
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "container listing failed")
|
||||
}
|
||||
@@ -607,14 +705,20 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// Check to see if container exists first
|
||||
var err error = swift.ContainerNotFound
|
||||
if !f.noCheckContainer {
|
||||
_, _, err = f.c.Container(f.container)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, _, err = f.c.Container(f.container)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if f.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = f.opt.StoragePolicy
|
||||
}
|
||||
err = f.c.ContainerCreate(f.container, headers)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerCreate(f.container, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
if err == nil {
|
||||
f.containerOK = true
|
||||
@@ -631,7 +735,11 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
err := f.c.ContainerDelete(f.container)
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerDelete(f.container)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.containerOK = false
|
||||
}
|
||||
@@ -690,7 +798,10 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
_, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -739,7 +850,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
fs.Debugf(o, "Returning empty Md5sum for swift large object")
|
||||
return "", nil
|
||||
}
|
||||
return strings.ToLower(o.info.Hash), nil
|
||||
return strings.ToLower(o.md5), nil
|
||||
}
|
||||
|
||||
// hasHeader checks for the header passed in returning false if the
|
||||
@@ -768,7 +879,22 @@ func (o *Object) isStaticLargeObject() (bool, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.info.Bytes
|
||||
return o.size
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata in the object from a swift.Object
|
||||
//
|
||||
// Sets
|
||||
// o.lastModified
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.contentType
|
||||
func (o *Object) decodeMetaData(info *swift.Object) (err error) {
|
||||
o.lastModified = info.LastModified
|
||||
o.size = info.Bytes
|
||||
o.md5 = info.Hash
|
||||
o.contentType = info.ContentType
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
@@ -780,15 +906,23 @@ func (o *Object) readMetaData() (err error) {
|
||||
if o.headers != nil {
|
||||
return nil
|
||||
}
|
||||
info, h, err := o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
|
||||
var info swift.Object
|
||||
var h swift.Headers
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, h, err = o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if err == swift.ObjectNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
o.info = info
|
||||
o.headers = h
|
||||
err = o.decodeMetaData(&info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -799,17 +933,17 @@ func (o *Object) readMetaData() (err error) {
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
if fs.Config.UseServerModTime {
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read metadata: %s", err)
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
modTime, err := o.headers.ObjectMetadata().GetModTime()
|
||||
if err != nil {
|
||||
// fs.Logf(o, "Failed to read mtime from object: %v", err)
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
@@ -832,7 +966,10 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
newHeaders[k] = v
|
||||
}
|
||||
}
|
||||
return o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
@@ -840,14 +977,17 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
// It compares the Content-Type to directoryMarkerContentType - that
|
||||
// makes it a directory marker which is not storable.
|
||||
func (o *Object) Storable() bool {
|
||||
return o.info.ContentType != directoryMarkerContentType
|
||||
return o.contentType != directoryMarkerContentType
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
_, isRanging := headers["Range"]
|
||||
in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -874,13 +1014,20 @@ func (o *Object) removeSegments(except string) error {
|
||||
}
|
||||
segmentPath := segmentsRoot + remote
|
||||
fs.Debugf(o, "Removing segment file %q in container %q", segmentPath, o.fs.segmentsContainer)
|
||||
return o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
|
||||
var err error
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the segments container if empty, ignore errors
|
||||
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Debugf(o, "Removed empty container %q", o.fs.segmentsContainer)
|
||||
}
|
||||
@@ -909,13 +1056,19 @@ func urlEncode(str string) string {
|
||||
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
|
||||
// Create the segmentsContainer if it doesn't exist
|
||||
var err error
|
||||
_, _, err = o.fs.c.Container(o.fs.segmentsContainer)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, _, err = o.fs.c.Container(o.fs.segmentsContainer)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if o.fs.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
|
||||
}
|
||||
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -944,7 +1097,10 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
segmentReader := io.LimitReader(in, n)
|
||||
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
|
||||
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
|
||||
_, err := o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -955,7 +1111,10 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
headers["Content-Length"] = "0" // set Content-Length as we know it
|
||||
emptyReader := bytes.NewReader(nil)
|
||||
manifestName := o.fs.root + o.remote
|
||||
_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return uniquePrefix + "/", err
|
||||
}
|
||||
|
||||
@@ -985,17 +1144,31 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
contentType := fs.MimeType(src)
|
||||
headers := m.ObjectHeaders()
|
||||
uniquePrefix := ""
|
||||
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.headers = nil // wipe old metadata
|
||||
} else {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length as we know it
|
||||
_, err := o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
if size >= 0 {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
|
||||
}
|
||||
var rxHeaders swift.Headers
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// set Metadata since ObjectPut checked the hash and length so we know the
|
||||
// object has been safely uploaded
|
||||
o.lastModified = modTime
|
||||
o.size = size
|
||||
o.md5 = rxHeaders["ETag"]
|
||||
o.contentType = contentType
|
||||
o.headers = headers
|
||||
}
|
||||
|
||||
// If file was a dynamic large object then remove old/all segments
|
||||
@@ -1006,8 +1179,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.headers = nil // wipe old metadata
|
||||
// Read the metadata from the newly created object if necessary
|
||||
return o.readMetaData()
|
||||
}
|
||||
|
||||
@@ -1018,7 +1190,10 @@ func (o *Object) Remove() error {
|
||||
return err
|
||||
}
|
||||
// Remove file/manifest first
|
||||
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1034,7 +1209,7 @@ func (o *Object) Remove() error {
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
return o.info.ContentType
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test Swift filesystem interface
|
||||
package swift_test
|
||||
package swift
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,12 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSwift:",
|
||||
NilObject: (*swift.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "union",
|
||||
Description: "Builds a stackable unification remote, which can appear to merge the contents of several remotes",
|
||||
Description: "A stackable unification remote, which can appear to merge the contents of several remotes",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remotes",
|
||||
@@ -35,13 +35,36 @@ type Options struct {
|
||||
Remotes fs.SpaceSepList `config:"remotes"`
|
||||
}
|
||||
|
||||
// Fs represents a remote acd server
|
||||
// Fs represents a union of remotes
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
root string // the path we are working on
|
||||
remotes []fs.Fs // slice of remotes
|
||||
wr fs.Fs // writable remote
|
||||
hashSet hash.Set // intersection of hash types
|
||||
}
|
||||
|
||||
// Object describes a union Object
|
||||
//
|
||||
// This is a wrapped object which returns the Union Fs as its parent
|
||||
type Object struct {
|
||||
fs.Object
|
||||
fs *Fs // what this object is part of
|
||||
}
|
||||
|
||||
// Wrap an existing object in the union Object
|
||||
func (f *Fs) wrapObject(o fs.Object) *Object {
|
||||
return &Object{
|
||||
Object: o,
|
||||
fs: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns the union Fs as the parent
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
@@ -66,18 +89,146 @@ func (f *Fs) Features() *fs.Features {
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return f.remotes[len(f.remotes)-1].Rmdir(dir)
|
||||
return f.wr.Rmdir(dir)
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
// This could probably be set if all remotes share the same hashing algorithm
|
||||
return hash.Set(hash.None)
|
||||
return f.hashSet
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
return f.remotes[len(f.remotes)-1].Mkdir(dir)
|
||||
return f.wr.Mkdir(dir)
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge() error {
|
||||
return f.wr.Features().Purge()
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
if src.Fs() != f.wr {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
o, err := f.wr.Features().Copy(src, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
if src.Fs() != f.wr {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
o, err := f.wr.Features().Move(src, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return f.wr.Features().DirMove(srcFs.wr, srcRemote, dstRemote)
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
// At least one value will be written to the channel,
|
||||
// specifying the initial value and updated values might
|
||||
// follow. A 0 Duration should pause the polling.
|
||||
// The ChangeNotify implemantion must empty the channel
|
||||
// regulary. When the channel gets closed, the implemantion
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(fn func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
var remoteChans []chan time.Duration
|
||||
|
||||
for _, remote := range f.remotes {
|
||||
if ChangeNotify := remote.Features().ChangeNotify; ChangeNotify != nil {
|
||||
ch := make(chan time.Duration)
|
||||
remoteChans = append(remoteChans, ch)
|
||||
ChangeNotify(fn, ch)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
for i := range ch {
|
||||
for _, c := range remoteChans {
|
||||
c <- i
|
||||
}
|
||||
}
|
||||
for _, c := range remoteChans {
|
||||
close(c)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
for _, remote := range f.remotes {
|
||||
if DirCacheFlush := remote.Features().DirCacheFlush; DirCacheFlush != nil {
|
||||
DirCacheFlush()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.wr.Features().PutStream(in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
return f.wr.Features().About()
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
@@ -86,7 +237,11 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.remotes[len(f.remotes)-1].Put(in, src, options...)
|
||||
o, err := f.wr.Put(in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
@@ -117,8 +272,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if !found {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
for key := range set {
|
||||
entries = append(entries, set[key])
|
||||
for _, entry := range set {
|
||||
if o, ok := entry.(fs.Object); ok {
|
||||
entry = f.wrapObject(o)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
@@ -134,7 +292,7 @@ func (f *Fs) NewObject(path string) (fs.Object, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "NewObject failed on %v", remote)
|
||||
}
|
||||
return obj, nil
|
||||
return f.wrapObject(obj), nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
@@ -204,6 +362,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
remotes: remotes,
|
||||
wr: remotes[len(remotes)-1],
|
||||
}
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -212,16 +371,53 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
features = features.Mask(f.wr) // mask the features just on the writable fs
|
||||
|
||||
// FIXME maybe should be masking the bools here?
|
||||
|
||||
// Clear ChangeNotify and DirCacheFlush if all are nil
|
||||
clearChangeNotify := true
|
||||
clearDirCacheFlush := true
|
||||
for _, remote := range f.remotes {
|
||||
features = features.Mask(remote)
|
||||
remoteFeatures := remote.Features()
|
||||
if remoteFeatures.ChangeNotify != nil {
|
||||
clearChangeNotify = false
|
||||
}
|
||||
if remoteFeatures.DirCacheFlush != nil {
|
||||
clearDirCacheFlush = false
|
||||
}
|
||||
}
|
||||
if clearChangeNotify {
|
||||
features.ChangeNotify = nil
|
||||
}
|
||||
if clearDirCacheFlush {
|
||||
features.DirCacheFlush = nil
|
||||
}
|
||||
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
hashSet := f.remotes[0].Hashes()
|
||||
for _, remote := range f.remotes[1:] {
|
||||
hashSet = hashSet.Overlap(remote.Hashes())
|
||||
}
|
||||
f.hashSet = hashSet
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -6,7 +6,10 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -145,8 +148,11 @@ var timeFormats = []string{
|
||||
time.RFC1123Z, // Fri, 05 Jan 2018 14:14:38 +0000 (as used by mydrive.ch)
|
||||
time.UnixDate, // Wed May 17 15:31:58 UTC 2017 (as used in an internal server)
|
||||
noZerosRFC1123, // Fri, 7 Sep 2018 08:49:58 GMT (as used by server in #2574)
|
||||
time.RFC3339, // Wed, 31 Oct 2018 13:57:11 CET (as used by komfortcloud.de)
|
||||
}
|
||||
|
||||
var oneTimeError sync.Once
|
||||
|
||||
// UnmarshalXML turns XML into a Time
|
||||
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var v string
|
||||
@@ -155,6 +161,12 @@ func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// If time is missing then return the epoch
|
||||
if v == "" {
|
||||
*t = Time(time.Unix(0, 0))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse the time format in multiple possible ways
|
||||
var newT time.Time
|
||||
for _, timeFormat := range timeFormats {
|
||||
@@ -164,5 +176,14 @@ func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
oneTimeError.Do(func() {
|
||||
fs.Errorf(nil, "Failed to parse time %q - using the epoch", v)
|
||||
})
|
||||
// Return the epoch instead
|
||||
*t = Time(time.Unix(0, 0))
|
||||
// ignore error
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ import (
|
||||
"github.com/ncw/rclone/backend/webdav/api"
|
||||
"github.com/ncw/rclone/backend/webdav/odrvcookie"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
@@ -96,10 +95,11 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
URL string `config:"url"`
|
||||
Vendor string `config:"vendor"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
URL string `config:"url"`
|
||||
Vendor string `config:"vendor"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
BearerToken string `config:"bearer_token"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@@ -283,9 +283,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
bearerToken := config.FileGet(name, "bearer_token")
|
||||
if !strings.HasSuffix(opt.URL, "/") {
|
||||
opt.URL += "/"
|
||||
}
|
||||
@@ -320,10 +317,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
if user != "" || pass != "" {
|
||||
if opt.User != "" || opt.Pass != "" {
|
||||
f.srv.SetUserPass(opt.User, opt.Pass)
|
||||
} else if bearerToken != "" {
|
||||
f.srv.SetHeader("Authorization", "BEARER "+bearerToken)
|
||||
} else if opt.BearerToken != "" {
|
||||
f.srv.SetHeader("Authorization", "BEARER "+opt.BearerToken)
|
||||
}
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
err = f.setQuirks(opt.Vendor)
|
||||
@@ -604,10 +601,9 @@ func (f *Fs) mkParentDir(dirPath string) error {
|
||||
return f.mkdir(parent)
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// We assume the root is already ceated
|
||||
// low level mkdir, only makes the directory, doesn't attempt to create parents
|
||||
func (f *Fs) _mkdir(dirPath string) error {
|
||||
// We assume the root is already created
|
||||
if dirPath == "" {
|
||||
return nil
|
||||
}
|
||||
@@ -620,20 +616,26 @@ func (f *Fs) mkdir(dirPath string) error {
|
||||
Path: dirPath,
|
||||
NoResponse: true,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
err := f._mkdir(dirPath)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// already exists
|
||||
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable {
|
||||
return nil
|
||||
}
|
||||
// parent does not exists
|
||||
// parent does not exist
|
||||
if apiErr.StatusCode == http.StatusConflict {
|
||||
err = f.mkParentDir(dirPath)
|
||||
if err == nil {
|
||||
err = f.mkdir(dirPath)
|
||||
err = f._mkdir(dirPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -968,6 +970,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
Body: in,
|
||||
NoResponse: true,
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: fs.MimeType(src),
|
||||
}
|
||||
if o.fs.useOCMtime {
|
||||
opts.ExtraHeaders = map[string]string{
|
||||
@@ -979,6 +982,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
// Give the WebDAV server a chance to get its internal state in order after the
|
||||
// error. The error may have been local in which case we closed the connection.
|
||||
// The server may still be dealing with it for a moment. A sleep isn't ideal but I
|
||||
// haven't been able to think of a better method to find out if the server has
|
||||
// finished - ncw
|
||||
time.Sleep(1 * time.Second)
|
||||
// Remove failed upload
|
||||
_ = o.Remove()
|
||||
return err
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package src
|
||||
|
||||
//from yadisk
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
//RootAddr is the base URL for Yandex Disk API.
|
||||
const RootAddr = "https://cloud-api.yandex.com" //also https://cloud-api.yandex.net and https://cloud-api.yandex.ru
|
||||
|
||||
func (c *Client) setRequestScope(req *http.Request) {
|
||||
req.Header.Add("Accept", "application/json")
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
req.Header.Add("Authorization", "OAuth "+c.token)
|
||||
}
|
||||
|
||||
func (c *Client) scopedRequest(method, urlPath string, body io.Reader) (*http.Request, error) {
|
||||
fullURL := RootAddr
|
||||
if urlPath[:1] != "/" {
|
||||
fullURL += "/" + urlPath
|
||||
} else {
|
||||
fullURL += urlPath
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, fullURL, body)
|
||||
if err != nil {
|
||||
return req, err
|
||||
}
|
||||
|
||||
c.setRequestScope(req)
|
||||
return req, nil
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
//Client struct
|
||||
type Client struct {
|
||||
token string
|
||||
basePath string
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
||||
//NewClient creates new client
|
||||
func NewClient(token string, client ...*http.Client) *Client {
|
||||
return newClientInternal(
|
||||
token,
|
||||
"https://cloud-api.yandex.com/v1/disk", //also "https://cloud-api.yandex.net/v1/disk" "https://cloud-api.yandex.ru/v1/disk"
|
||||
client...)
|
||||
}
|
||||
|
||||
func newClientInternal(token string, basePath string, client ...*http.Client) *Client {
|
||||
c := &Client{
|
||||
token: token,
|
||||
basePath: basePath,
|
||||
}
|
||||
if len(client) != 0 {
|
||||
c.HTTPClient = client[0]
|
||||
} else {
|
||||
c.HTTPClient = http.DefaultClient
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
//ErrorHandler type
|
||||
type ErrorHandler func(*http.Response) error
|
||||
|
||||
var defaultErrorHandler ErrorHandler = func(resp *http.Response) error {
|
||||
if resp.StatusCode/100 == 5 {
|
||||
return errors.New("server error")
|
||||
}
|
||||
|
||||
if resp.StatusCode/100 == 4 {
|
||||
var response DiskClientError
|
||||
contents, _ := ioutil.ReadAll(resp.Body)
|
||||
err := json.Unmarshal(contents, &response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
if resp.StatusCode/100 == 3 {
|
||||
return errors.New("redirect error")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (HTTPRequest *HTTPRequest) run(client *Client) ([]byte, error) {
|
||||
var err error
|
||||
values := make(url.Values)
|
||||
if HTTPRequest.Parameters != nil {
|
||||
for k, v := range HTTPRequest.Parameters {
|
||||
values.Set(k, fmt.Sprintf("%v", v))
|
||||
}
|
||||
}
|
||||
|
||||
var req *http.Request
|
||||
if HTTPRequest.Method == "POST" {
|
||||
// TODO json serialize
|
||||
req, err = http.NewRequest(
|
||||
"POST",
|
||||
client.basePath+HTTPRequest.Path,
|
||||
strings.NewReader(values.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO
|
||||
// req.Header.Set("Content-Type", "application/json")
|
||||
} else {
|
||||
req, err = http.NewRequest(
|
||||
HTTPRequest.Method,
|
||||
client.basePath+HTTPRequest.Path+"?"+values.Encode(),
|
||||
nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for headerName := range HTTPRequest.Headers {
|
||||
var headerValues = HTTPRequest.Headers[headerName]
|
||||
for _, headerValue := range headerValues {
|
||||
req.Header.Set(headerName, headerValue)
|
||||
}
|
||||
}
|
||||
return runRequest(client, req)
|
||||
}
|
||||
|
||||
func runRequest(client *Client, req *http.Request) ([]byte, error) {
|
||||
return runRequestWithErrorHandler(client, req, defaultErrorHandler)
|
||||
}
|
||||
|
||||
func runRequestWithErrorHandler(client *Client, req *http.Request, errorHandler ErrorHandler) (out []byte, err error) {
|
||||
resp, err := client.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer CheckClose(resp.Body, &err)
|
||||
|
||||
return checkResponseForErrorsWithErrorHandler(resp, errorHandler)
|
||||
}
|
||||
|
||||
func checkResponseForErrorsWithErrorHandler(resp *http.Response, errorHandler ErrorHandler) ([]byte, error) {
|
||||
if resp.StatusCode/100 > 2 {
|
||||
return nil, errorHandler(resp)
|
||||
}
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
||||
|
||||
// CheckClose is a utility function used to check the return from
|
||||
// Close in a defer statement.
|
||||
func CheckClose(c io.Closer, err *error) {
|
||||
cerr := c.Close()
|
||||
if *err == nil {
|
||||
*err = cerr
|
||||
}
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
//CustomPropertyResponse struct we send and is returned by the API for CustomProperty request.
|
||||
type CustomPropertyResponse struct {
|
||||
CustomProperties map[string]interface{} `json:"custom_properties"`
|
||||
}
|
||||
|
||||
//SetCustomProperty will set specified data from Yandex Disk
|
||||
func (c *Client) SetCustomProperty(remotePath string, property string, value string) error {
|
||||
rcm := map[string]interface{}{
|
||||
property: value,
|
||||
}
|
||||
cpr := CustomPropertyResponse{rcm}
|
||||
data, _ := json.Marshal(cpr)
|
||||
body := bytes.NewReader(data)
|
||||
err := c.SetCustomPropertyRequest(remotePath, body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
//SetCustomPropertyRequest will make an CustomProperty request and return a URL to CustomProperty data to.
|
||||
func (c *Client) SetCustomPropertyRequest(remotePath string, body io.Reader) (err error) {
|
||||
values := url.Values{}
|
||||
values.Add("path", remotePath)
|
||||
req, err := c.scopedRequest("PATCH", "/v1/disk/resources?"+values.Encode(), body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := CheckAPIError(resp); err != nil {
|
||||
return err
|
||||
}
|
||||
defer CheckClose(resp.Body, &err)
|
||||
|
||||
//If needed we can read response and check if custom_property is set.
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Delete will remove specified file/folder from Yandex Disk
|
||||
func (c *Client) Delete(remotePath string, permanently bool) error {
|
||||
|
||||
values := url.Values{}
|
||||
values.Add("permanently", strconv.FormatBool(permanently))
|
||||
values.Add("path", remotePath)
|
||||
urlPath := "/v1/disk/resources?" + values.Encode()
|
||||
fullURL := RootAddr
|
||||
if urlPath[:1] != "/" {
|
||||
fullURL += "/" + urlPath
|
||||
} else {
|
||||
fullURL += urlPath
|
||||
}
|
||||
|
||||
return c.PerformDelete(fullURL)
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package src
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
//DiskInfoRequest type
|
||||
type DiskInfoRequest struct {
|
||||
client *Client
|
||||
HTTPRequest *HTTPRequest
|
||||
}
|
||||
|
||||
func (req *DiskInfoRequest) request() *HTTPRequest {
|
||||
return req.HTTPRequest
|
||||
}
|
||||
|
||||
//DiskInfoResponse struct is returned by the API for DiskInfo request.
|
||||
type DiskInfoResponse struct {
|
||||
TrashSize uint64 `json:"TrashSize"`
|
||||
TotalSpace uint64 `json:"TotalSpace"`
|
||||
UsedSpace uint64 `json:"UsedSpace"`
|
||||
SystemFolders map[string]string `json:"SystemFolders"`
|
||||
}
|
||||
|
||||
//NewDiskInfoRequest create new DiskInfo Request
|
||||
func (c *Client) NewDiskInfoRequest() *DiskInfoRequest {
|
||||
return &DiskInfoRequest{
|
||||
client: c,
|
||||
HTTPRequest: createGetRequest(c, "/", nil),
|
||||
}
|
||||
}
|
||||
|
||||
//Exec run DiskInfo Request
|
||||
func (req *DiskInfoRequest) Exec() (*DiskInfoResponse, error) {
|
||||
data, err := req.request().run(req.client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info DiskInfoResponse
|
||||
err = json.Unmarshal(data, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info.SystemFolders == nil {
|
||||
info.SystemFolders = make(map[string]string)
|
||||
}
|
||||
|
||||
return &info, nil
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// DownloadResponse struct is returned by the API for Download request.
|
||||
type DownloadResponse struct {
|
||||
HRef string `json:"href"`
|
||||
Method string `json:"method"`
|
||||
Templated bool `json:"templated"`
|
||||
}
|
||||
|
||||
// Download will get specified data from Yandex.Disk supplying the extra headers
|
||||
func (c *Client) Download(remotePath string, headers map[string]string) (io.ReadCloser, error) { //io.Writer
|
||||
ur, err := c.DownloadRequest(remotePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.PerformDownload(ur.HRef, headers)
|
||||
}
|
||||
|
||||
// DownloadRequest will make an download request and return a URL to download data to.
|
||||
func (c *Client) DownloadRequest(remotePath string) (ur *DownloadResponse, err error) {
|
||||
values := url.Values{}
|
||||
values.Add("path", remotePath)
|
||||
|
||||
req, err := c.scopedRequest("GET", "/v1/disk/resources/download?"+values.Encode(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := CheckAPIError(resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer CheckClose(resp.Body, &err)
|
||||
|
||||
ur, err = ParseDownloadResponse(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ur, nil
|
||||
}
|
||||
|
||||
// ParseDownloadResponse tries to read and parse DownloadResponse struct.
|
||||
func ParseDownloadResponse(data io.Reader) (*DownloadResponse, error) {
|
||||
dec := json.NewDecoder(data)
|
||||
var ur DownloadResponse
|
||||
|
||||
if err := dec.Decode(&ur); err == io.EOF {
|
||||
// ok
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: check if there is any trash data after JSON and crash if there is.
|
||||
|
||||
return &ur, nil
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package src
|
||||
|
||||
// EmptyTrash will permanently delete all trashed files/folders from Yandex Disk
|
||||
func (c *Client) EmptyTrash() error {
|
||||
fullURL := RootAddr
|
||||
fullURL += "/v1/disk/trash/resources"
|
||||
|
||||
return c.PerformDelete(fullURL)
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
package src
|
||||
|
||||
//from yadisk
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ErrorResponse represents erroneous API response.
|
||||
// Implements go's built in `error`.
|
||||
type ErrorResponse struct {
|
||||
ErrorName string `json:"error"`
|
||||
Description string `json:"description"`
|
||||
Message string `json:"message"`
|
||||
|
||||
StatusCode int `json:""`
|
||||
}
|
||||
|
||||
func (e *ErrorResponse) Error() string {
|
||||
return fmt.Sprintf("[%d - %s] %s (%s)", e.StatusCode, e.ErrorName, e.Description, e.Message)
|
||||
}
|
||||
|
||||
// ProccessErrorResponse tries to represent data passed as
|
||||
// an ErrorResponse object.
|
||||
func ProccessErrorResponse(data io.Reader) (*ErrorResponse, error) {
|
||||
dec := json.NewDecoder(data)
|
||||
var errorResponse ErrorResponse
|
||||
|
||||
if err := dec.Decode(&errorResponse); err == io.EOF {
|
||||
// ok
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: check if there is any trash data after JSON and crash if there is.
|
||||
|
||||
return &errorResponse, nil
|
||||
}
|
||||
|
||||
// CheckAPIError is a convenient function to turn erroneous
|
||||
// API response into go error. It closes the Body on error.
|
||||
func CheckAPIError(resp *http.Response) (err error) {
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 400 {
|
||||
return nil
|
||||
}
|
||||
|
||||
defer CheckClose(resp.Body, &err)
|
||||
|
||||
errorResponse, err := ProccessErrorResponse(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errorResponse.StatusCode = resp.StatusCode
|
||||
|
||||
return errorResponse
|
||||
}
|
||||
|
||||
// ProccessErrorString tries to represent data passed as
|
||||
// an ErrorResponse object.
|
||||
func ProccessErrorString(data string) (*ErrorResponse, error) {
|
||||
var errorResponse ErrorResponse
|
||||
if err := json.Unmarshal([]byte(data), &errorResponse); err == nil {
|
||||
// ok
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: check if there is any trash data after JSON and crash if there is.
|
||||
|
||||
return &errorResponse, nil
|
||||
}
|
||||
|
||||
// ParseAPIError Parse json error response from API
|
||||
func (c *Client) ParseAPIError(jsonErr string) (string, error) { //ErrorName
|
||||
errorResponse, err := ProccessErrorString(jsonErr)
|
||||
if err != nil {
|
||||
return err.Error(), err
|
||||
}
|
||||
|
||||
return errorResponse.ErrorName, nil
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package src
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
//DiskClientError struct
|
||||
type DiskClientError struct {
|
||||
Description string `json:"Description"`
|
||||
Code string `json:"Error"`
|
||||
}
|
||||
|
||||
func (e DiskClientError) Error() string {
|
||||
b, _ := json.Marshal(e)
|
||||
return string(b)
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
package src
|
||||
|
||||
// FilesResourceListResponse struct is returned by the API for requests.
|
||||
type FilesResourceListResponse struct {
|
||||
Items []ResourceInfoResponse `json:"items"`
|
||||
Limit *uint64 `json:"limit"`
|
||||
Offset *uint64 `json:"offset"`
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FlatFileListRequest struct client for FlatFileList Request
|
||||
type FlatFileListRequest struct {
|
||||
client *Client
|
||||
HTTPRequest *HTTPRequest
|
||||
}
|
||||
|
||||
// FlatFileListRequestOptions struct - options for request
|
||||
type FlatFileListRequestOptions struct {
|
||||
MediaType []MediaType
|
||||
Limit *uint32
|
||||
Offset *uint32
|
||||
Fields []string
|
||||
PreviewSize *PreviewSize
|
||||
PreviewCrop *bool
|
||||
}
|
||||
|
||||
// Request get request
|
||||
func (req *FlatFileListRequest) Request() *HTTPRequest {
|
||||
return req.HTTPRequest
|
||||
}
|
||||
|
||||
// NewFlatFileListRequest create new FlatFileList Request
|
||||
func (c *Client) NewFlatFileListRequest(options ...FlatFileListRequestOptions) *FlatFileListRequest {
|
||||
var parameters = make(map[string]interface{})
|
||||
if len(options) > 0 {
|
||||
opt := options[0]
|
||||
if opt.Limit != nil {
|
||||
parameters["limit"] = *opt.Limit
|
||||
}
|
||||
if opt.Offset != nil {
|
||||
parameters["offset"] = *opt.Offset
|
||||
}
|
||||
if opt.Fields != nil {
|
||||
parameters["fields"] = strings.Join(opt.Fields, ",")
|
||||
}
|
||||
if opt.PreviewSize != nil {
|
||||
parameters["preview_size"] = opt.PreviewSize.String()
|
||||
}
|
||||
if opt.PreviewCrop != nil {
|
||||
parameters["preview_crop"] = *opt.PreviewCrop
|
||||
}
|
||||
if opt.MediaType != nil {
|
||||
var strMediaTypes = make([]string, len(opt.MediaType))
|
||||
for i, t := range opt.MediaType {
|
||||
strMediaTypes[i] = t.String()
|
||||
}
|
||||
parameters["media_type"] = strings.Join(strMediaTypes, ",")
|
||||
}
|
||||
}
|
||||
return &FlatFileListRequest{
|
||||
client: c,
|
||||
HTTPRequest: createGetRequest(c, "/resources/files", parameters),
|
||||
}
|
||||
}
|
||||
|
||||
// Exec run FlatFileList Request
|
||||
func (req *FlatFileListRequest) Exec() (*FilesResourceListResponse, error) {
|
||||
data, err := req.Request().run(req.client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var info FilesResourceListResponse
|
||||
err = json.Unmarshal(data, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cap(info.Items) == 0 {
|
||||
info.Items = []ResourceInfoResponse{}
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package src
|
||||
|
||||
// HTTPRequest struct
|
||||
type HTTPRequest struct {
|
||||
Method string
|
||||
Path string
|
||||
Parameters map[string]interface{}
|
||||
Headers map[string][]string
|
||||
}
|
||||
|
||||
func createGetRequest(client *Client, path string, params map[string]interface{}) *HTTPRequest {
|
||||
return createRequest(client, "GET", path, params)
|
||||
}
|
||||
|
||||
func createRequest(client *Client, method string, path string, parameters map[string]interface{}) *HTTPRequest {
|
||||
var headers = make(map[string][]string)
|
||||
headers["Authorization"] = []string{"OAuth " + client.token}
|
||||
return &HTTPRequest{
|
||||
Method: method,
|
||||
Path: path,
|
||||
Parameters: parameters,
|
||||
Headers: headers,
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package src
|
||||
|
||||
// LastUploadedResourceListResponse struct
|
||||
type LastUploadedResourceListResponse struct {
|
||||
Items []ResourceInfoResponse `json:"items"`
|
||||
Limit *uint64 `json:"limit"`
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// LastUploadedResourceListRequest struct
|
||||
type LastUploadedResourceListRequest struct {
|
||||
client *Client
|
||||
HTTPRequest *HTTPRequest
|
||||
}
|
||||
|
||||
// LastUploadedResourceListRequestOptions struct
|
||||
type LastUploadedResourceListRequestOptions struct {
|
||||
MediaType []MediaType
|
||||
Limit *uint32
|
||||
Fields []string
|
||||
PreviewSize *PreviewSize
|
||||
PreviewCrop *bool
|
||||
}
|
||||
|
||||
// Request return request
|
||||
func (req *LastUploadedResourceListRequest) Request() *HTTPRequest {
|
||||
return req.HTTPRequest
|
||||
}
|
||||
|
||||
// NewLastUploadedResourceListRequest create new LastUploadedResourceList Request
|
||||
func (c *Client) NewLastUploadedResourceListRequest(options ...LastUploadedResourceListRequestOptions) *LastUploadedResourceListRequest {
|
||||
var parameters = make(map[string]interface{})
|
||||
if len(options) > 0 {
|
||||
opt := options[0]
|
||||
if opt.Limit != nil {
|
||||
parameters["limit"] = opt.Limit
|
||||
}
|
||||
if opt.Fields != nil {
|
||||
parameters["fields"] = strings.Join(opt.Fields, ",")
|
||||
}
|
||||
if opt.PreviewSize != nil {
|
||||
parameters["preview_size"] = opt.PreviewSize.String()
|
||||
}
|
||||
if opt.PreviewCrop != nil {
|
||||
parameters["preview_crop"] = opt.PreviewCrop
|
||||
}
|
||||
if opt.MediaType != nil {
|
||||
var strMediaTypes = make([]string, len(opt.MediaType))
|
||||
for i, t := range opt.MediaType {
|
||||
strMediaTypes[i] = t.String()
|
||||
}
|
||||
parameters["media_type"] = strings.Join(strMediaTypes, ",")
|
||||
}
|
||||
}
|
||||
return &LastUploadedResourceListRequest{
|
||||
client: c,
|
||||
HTTPRequest: createGetRequest(c, "/resources/last-uploaded", parameters),
|
||||
}
|
||||
}
|
||||
|
||||
// Exec run LastUploadedResourceList Request
|
||||
func (req *LastUploadedResourceListRequest) Exec() (*LastUploadedResourceListResponse, error) {
|
||||
data, err := req.Request().run(req.client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var info LastUploadedResourceListResponse
|
||||
err = json.Unmarshal(data, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cap(info.Items) == 0 {
|
||||
info.Items = []ResourceInfoResponse{}
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
package src
|
||||
|
||||
// MediaType struct - media types
|
||||
type MediaType struct {
|
||||
mediaType string
|
||||
}
|
||||
|
||||
// Audio - media type
|
||||
func (m *MediaType) Audio() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "audio",
|
||||
}
|
||||
}
|
||||
|
||||
// Backup - media type
|
||||
func (m *MediaType) Backup() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "backup",
|
||||
}
|
||||
}
|
||||
|
||||
// Book - media type
|
||||
func (m *MediaType) Book() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "book",
|
||||
}
|
||||
}
|
||||
|
||||
// Compressed - media type
|
||||
func (m *MediaType) Compressed() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "compressed",
|
||||
}
|
||||
}
|
||||
|
||||
// Data - media type
|
||||
func (m *MediaType) Data() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "data",
|
||||
}
|
||||
}
|
||||
|
||||
// Development - media type
|
||||
func (m *MediaType) Development() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "development",
|
||||
}
|
||||
}
|
||||
|
||||
// Diskimage - media type
|
||||
func (m *MediaType) Diskimage() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "diskimage",
|
||||
}
|
||||
}
|
||||
|
||||
// Document - media type
|
||||
func (m *MediaType) Document() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "document",
|
||||
}
|
||||
}
|
||||
|
||||
// Encoded - media type
|
||||
func (m *MediaType) Encoded() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "encoded",
|
||||
}
|
||||
}
|
||||
|
||||
// Executable - media type
|
||||
func (m *MediaType) Executable() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "executable",
|
||||
}
|
||||
}
|
||||
|
||||
// Flash - media type
|
||||
func (m *MediaType) Flash() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "flash",
|
||||
}
|
||||
}
|
||||
|
||||
// Font - media type
|
||||
func (m *MediaType) Font() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "font",
|
||||
}
|
||||
}
|
||||
|
||||
// Image - media type
|
||||
func (m *MediaType) Image() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "image",
|
||||
}
|
||||
}
|
||||
|
||||
// Settings - media type
|
||||
func (m *MediaType) Settings() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "settings",
|
||||
}
|
||||
}
|
||||
|
||||
// Spreadsheet - media type
|
||||
func (m *MediaType) Spreadsheet() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "spreadsheet",
|
||||
}
|
||||
}
|
||||
|
||||
// Text - media type
|
||||
func (m *MediaType) Text() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "text",
|
||||
}
|
||||
}
|
||||
|
||||
// Unknown - media type
|
||||
func (m *MediaType) Unknown() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "unknown",
|
||||
}
|
||||
}
|
||||
|
||||
// Video - media type
|
||||
func (m *MediaType) Video() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "video",
|
||||
}
|
||||
}
|
||||
|
||||
// Web - media type
|
||||
func (m *MediaType) Web() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "web",
|
||||
}
|
||||
}
|
||||
|
||||
// String - media type
|
||||
func (m *MediaType) String() string {
|
||||
return m.mediaType
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// Mkdir will make specified folder on Yandex Disk
|
||||
func (c *Client) Mkdir(remotePath string) (int, string, error) {
|
||||
|
||||
values := url.Values{}
|
||||
values.Add("path", remotePath) // only one current folder will be created. Not all the folders in the path.
|
||||
urlPath := "/v1/disk/resources?" + values.Encode()
|
||||
fullURL := RootAddr
|
||||
if urlPath[:1] != "/" {
|
||||
fullURL += "/" + urlPath
|
||||
} else {
|
||||
fullURL += urlPath
|
||||
}
|
||||
|
||||
return c.PerformMkdir(fullURL)
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// PerformDelete does the actual delete via DELETE request.
|
||||
func (c *Client) PerformDelete(url string) error {
|
||||
req, err := http.NewRequest("DELETE", url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//set access token and headers
|
||||
c.setRequestScope(req)
|
||||
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//204 - resource deleted.
|
||||
//202 - folder not empty, content will be deleted soon (async delete).
|
||||
if resp.StatusCode != 204 && resp.StatusCode != 202 {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.Errorf("delete error [%d]: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// PerformDownload does the actual download via unscoped GET request.
|
||||
func (c *Client) PerformDownload(url string, headers map[string]string) (out io.ReadCloser, err error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set any extra headers
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
|
||||
//c.setRequestScope(req)
|
||||
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, isRanging := req.Header["Range"]
|
||||
if !(resp.StatusCode == http.StatusOK || (isRanging && resp.StatusCode == http.StatusPartialContent)) {
|
||||
defer CheckClose(resp.Body, &err)
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.Errorf("download error [%d]: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// PerformMkdir does the actual mkdir via PUT request.
|
||||
func (c *Client) PerformMkdir(url string) (int, string, error) {
|
||||
req, err := http.NewRequest("PUT", url, nil)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
|
||||
//set access token and headers
|
||||
c.setRequestScope(req)
|
||||
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
|
||||
if resp.StatusCode != 201 {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
//third parameter is the json error response body
|
||||
return resp.StatusCode, string(body), errors.Errorf("create folder error [%d]: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return resp.StatusCode, "", nil
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package src
|
||||
|
||||
//from yadisk
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// PerformUpload does the actual upload via unscoped PUT request.
|
||||
func (c *Client) PerformUpload(url string, data io.Reader, contentType string) (err error) {
|
||||
req, err := http.NewRequest("PUT", url, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
|
||||
//c.setRequestScope(req)
|
||||
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer CheckClose(resp.Body, &err)
|
||||
|
||||
if resp.StatusCode != 201 {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.Errorf("upload error [%d]: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
package src
|
||||
|
||||
import "fmt"
|
||||
|
||||
// PreviewSize struct
|
||||
type PreviewSize struct {
|
||||
size string
|
||||
}
|
||||
|
||||
// PredefinedSizeS - set preview size
|
||||
func (s *PreviewSize) PredefinedSizeS() *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: "S",
|
||||
}
|
||||
}
|
||||
|
||||
// PredefinedSizeM - set preview size
|
||||
func (s *PreviewSize) PredefinedSizeM() *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: "M",
|
||||
}
|
||||
}
|
||||
|
||||
// PredefinedSizeL - set preview size
|
||||
func (s *PreviewSize) PredefinedSizeL() *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: "L",
|
||||
}
|
||||
}
|
||||
|
||||
// PredefinedSizeXL - set preview size
|
||||
func (s *PreviewSize) PredefinedSizeXL() *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: "XL",
|
||||
}
|
||||
}
|
||||
|
||||
// PredefinedSizeXXL - set preview size
|
||||
func (s *PreviewSize) PredefinedSizeXXL() *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: "XXL",
|
||||
}
|
||||
}
|
||||
|
||||
// PredefinedSizeXXXL - set preview size
|
||||
func (s *PreviewSize) PredefinedSizeXXXL() *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: "XXXL",
|
||||
}
|
||||
}
|
||||
|
||||
// ExactWidth - set preview size
|
||||
func (s *PreviewSize) ExactWidth(width uint32) *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: fmt.Sprintf("%dx", width),
|
||||
}
|
||||
}
|
||||
|
||||
// ExactHeight - set preview size
|
||||
func (s *PreviewSize) ExactHeight(height uint32) *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: fmt.Sprintf("x%d", height),
|
||||
}
|
||||
}
|
||||
|
||||
// ExactSize - set preview size
|
||||
func (s *PreviewSize) ExactSize(width uint32, height uint32) *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: fmt.Sprintf("%dx%d", width, height),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PreviewSize) String() string {
|
||||
return s.size
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package src
|
||||
|
||||
//ResourceInfoResponse struct is returned by the API for metedata requests.
|
||||
type ResourceInfoResponse struct {
|
||||
PublicKey string `json:"public_key"`
|
||||
Name string `json:"name"`
|
||||
Created string `json:"created"`
|
||||
CustomProperties map[string]interface{} `json:"custom_properties"`
|
||||
Preview string `json:"preview"`
|
||||
PublicURL string `json:"public_url"`
|
||||
OriginPath string `json:"origin_path"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
Md5 string `json:"md5"`
|
||||
ResourceType string `json:"type"`
|
||||
MimeType string `json:"mime_type"`
|
||||
Size uint64 `json:"size"`
|
||||
Embedded *ResourceListResponse `json:"_embedded"`
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package src
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// ResourceInfoRequest struct
|
||||
type ResourceInfoRequest struct {
|
||||
client *Client
|
||||
HTTPRequest *HTTPRequest
|
||||
}
|
||||
|
||||
// Request of ResourceInfoRequest
|
||||
func (req *ResourceInfoRequest) Request() *HTTPRequest {
|
||||
return req.HTTPRequest
|
||||
}
|
||||
|
||||
// NewResourceInfoRequest create new ResourceInfo Request
|
||||
func (c *Client) NewResourceInfoRequest(path string, options ...ResourceInfoRequestOptions) *ResourceInfoRequest {
|
||||
return &ResourceInfoRequest{
|
||||
client: c,
|
||||
HTTPRequest: createResourceInfoRequest(c, "/resources", path, options...),
|
||||
}
|
||||
}
|
||||
|
||||
// Exec run ResourceInfo Request
|
||||
func (req *ResourceInfoRequest) Exec() (*ResourceInfoResponse, error) {
|
||||
data, err := req.Request().run(req.client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info ResourceInfoResponse
|
||||
err = json.Unmarshal(data, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info.CustomProperties == nil {
|
||||
info.CustomProperties = make(map[string]interface{})
|
||||
}
|
||||
if info.Embedded != nil {
|
||||
if cap(info.Embedded.Items) == 0 {
|
||||
info.Embedded.Items = []ResourceInfoResponse{}
|
||||
}
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package src
|
||||
|
||||
import "strings"
|
||||
|
||||
func createResourceInfoRequest(c *Client,
|
||||
apiPath string,
|
||||
path string,
|
||||
options ...ResourceInfoRequestOptions) *HTTPRequest {
|
||||
var parameters = make(map[string]interface{})
|
||||
parameters["path"] = path
|
||||
if len(options) > 0 {
|
||||
opt := options[0]
|
||||
if opt.SortMode != nil {
|
||||
parameters["sort"] = opt.SortMode.String()
|
||||
}
|
||||
if opt.Limit != nil {
|
||||
parameters["limit"] = *opt.Limit
|
||||
}
|
||||
if opt.Offset != nil {
|
||||
parameters["offset"] = *opt.Offset
|
||||
}
|
||||
if opt.Fields != nil {
|
||||
parameters["fields"] = strings.Join(opt.Fields, ",")
|
||||
}
|
||||
if opt.PreviewSize != nil {
|
||||
parameters["preview_size"] = opt.PreviewSize.String()
|
||||
}
|
||||
if opt.PreviewCrop != nil {
|
||||
parameters["preview_crop"] = *opt.PreviewCrop
|
||||
}
|
||||
}
|
||||
return createGetRequest(c, apiPath, parameters)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user