mirror of
https://github.com/rclone/rclone.git
synced 2026-01-24 13:23:14 +00:00
Compare commits
427 Commits
hensur-bug
...
adb-remote
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c3e1a0f368 | ||
|
|
2683939a4b | ||
|
|
ed88ae878e | ||
|
|
2ba5c35e88 | ||
|
|
240c15883f | ||
|
|
38864adc9c | ||
|
|
5991315990 | ||
|
|
73f0a67d98 | ||
|
|
ffe067d6e7 | ||
|
|
b5f563fb0f | ||
|
|
9310c7f3e2 | ||
|
|
1c1a8ef24b | ||
|
|
2cfbc2852d | ||
|
|
b167d30420 | ||
|
|
ec59760d9c | ||
|
|
076d3da825 | ||
|
|
c3eecbe933 | ||
|
|
d8e5b19ed4 | ||
|
|
43bc381e90 | ||
|
|
fb5ee22112 | ||
|
|
35327dad6f | ||
|
|
ef5e1909a0 | ||
|
|
bca5d8009e | ||
|
|
334f19c974 | ||
|
|
42a5bf1d9f | ||
|
|
71d1890316 | ||
|
|
d29c545627 | ||
|
|
eb85ecc9c4 | ||
|
|
0dc08e1e61 | ||
|
|
76532408ef | ||
|
|
60a4a8a86d | ||
|
|
a0d4c04687 | ||
|
|
f3874707ee | ||
|
|
f8c2689e77 | ||
|
|
8ec55ae20b | ||
|
|
fc1bf5f931 | ||
|
|
578d00666c | ||
|
|
f5c853b5c8 | ||
|
|
23c0cd2482 | ||
|
|
8217f361cc | ||
|
|
a0016e00d1 | ||
|
|
99c37028ee | ||
|
|
cfba337ef0 | ||
|
|
fd370fcad2 | ||
|
|
c680bb3254 | ||
|
|
7d5d6c041f | ||
|
|
bdc638530e | ||
|
|
315cee23a0 | ||
|
|
2135879dda | ||
|
|
da90069462 | ||
|
|
08c4854e00 | ||
|
|
a838add230 | ||
|
|
d68b091170 | ||
|
|
d809bed438 | ||
|
|
3aa1818870 | ||
|
|
96f6708461 | ||
|
|
6641a25f8c | ||
|
|
cd46ce916b | ||
|
|
318d1bb6f9 | ||
|
|
b8b53901e8 | ||
|
|
6e153781a7 | ||
|
|
f27c2d9760 | ||
|
|
eb91356e28 | ||
|
|
bed2971bf0 | ||
|
|
f0696dfe30 | ||
|
|
a43ed567ee | ||
|
|
fffdbb31f5 | ||
|
|
cacefb9a82 | ||
|
|
d966cef14c | ||
|
|
a551978a3f | ||
|
|
97752ca8fb | ||
|
|
8d5d332daf | ||
|
|
6b3a9bf26a | ||
|
|
c1d9a1e174 | ||
|
|
98120bb864 | ||
|
|
f8ced557e3 | ||
|
|
7b20139c6a | ||
|
|
c496efe9a4 | ||
|
|
cf583e0237 | ||
|
|
f09d0f5fef | ||
|
|
1e6cbaa355 | ||
|
|
be643ecfbc | ||
|
|
0c4ed35b9b | ||
|
|
4e4feebf0a | ||
|
|
291f270904 | ||
|
|
f799be1d6a | ||
|
|
74297a0c55 | ||
|
|
7e13103ba2 | ||
|
|
34baf05d9d | ||
|
|
38c0018906 | ||
|
|
6f25e48cbb | ||
|
|
7e99abb5da | ||
|
|
629019c3e4 | ||
|
|
1402fcb234 | ||
|
|
b26276b416 | ||
|
|
e317f04098 | ||
|
|
65ff330602 | ||
|
|
52763e1918 | ||
|
|
23e06cedbd | ||
|
|
b369fcde28 | ||
|
|
c294068780 | ||
|
|
8a774a3dd4 | ||
|
|
53a8b5a275 | ||
|
|
bbd03f49a4 | ||
|
|
e31578e03c | ||
|
|
0855608bc1 | ||
|
|
f8dbf8292a | ||
|
|
144daec800 | ||
|
|
6a832b7173 | ||
|
|
184a9c8da6 | ||
|
|
88592a1779 | ||
|
|
92fa30a787 | ||
|
|
e4dfe78ef0 | ||
|
|
ba84eecd94 | ||
|
|
ea12d76c03 | ||
|
|
5f0a8a4e28 | ||
|
|
2fc095cd3e | ||
|
|
a2341cc412 | ||
|
|
9685be64cd | ||
|
|
39f5059d48 | ||
|
|
a30e80564d | ||
|
|
8e107b9657 | ||
|
|
21a0693b79 | ||
|
|
4846d9393d | ||
|
|
fc4f20d52f | ||
|
|
60558b5d37 | ||
|
|
5990573ccd | ||
|
|
bd11d3cb62 | ||
|
|
5e5578d2c3 | ||
|
|
1318c6aec8 | ||
|
|
f29757de3b | ||
|
|
f397c35935 | ||
|
|
f365230aea | ||
|
|
ff0b8e10af | ||
|
|
8d16a5693c | ||
|
|
781142a73f | ||
|
|
f471a7e3f5 | ||
|
|
d7a1fd2a6b | ||
|
|
7782eda88e | ||
|
|
d08453d402 | ||
|
|
71e98ea584 | ||
|
|
42d997f639 | ||
|
|
571b4c060b | ||
|
|
ff72059a94 | ||
|
|
2e6ef4f6ec | ||
|
|
0ec6dd9f4b | ||
|
|
0b7fdf16a2 | ||
|
|
5edfd31a6d | ||
|
|
7ee7bc87ae | ||
|
|
1433558c01 | ||
|
|
0458b961c5 | ||
|
|
c1998c4efe | ||
|
|
49da220b65 | ||
|
|
554ee0d963 | ||
|
|
2d2533a08a | ||
|
|
733b072d4f | ||
|
|
2d01a65e36 | ||
|
|
b8280521a5 | ||
|
|
60e6af2605 | ||
|
|
9d16822c63 | ||
|
|
38a0946071 | ||
|
|
95e52e1ac3 | ||
|
|
51ab1c940a | ||
|
|
6f30427357 | ||
|
|
3220acc729 | ||
|
|
3c97933416 | ||
|
|
039e2a9649 | ||
|
|
1c01d0b84a | ||
|
|
39eac7a765 | ||
|
|
082a7065b1 | ||
|
|
f7b08a6982 | ||
|
|
37e32d8c80 | ||
|
|
f2a1b991de | ||
|
|
4128e696d6 | ||
|
|
7e7f3de355 | ||
|
|
1f6a1cd26d | ||
|
|
2cfe2354df | ||
|
|
13387c0838 | ||
|
|
5babf2dc5c | ||
|
|
9012d7c6c1 | ||
|
|
df1faa9a8f | ||
|
|
3de7ad5223 | ||
|
|
9cb3a68c38 | ||
|
|
c1dd76788d | ||
|
|
5ee1816a71 | ||
|
|
63b51c6742 | ||
|
|
e7684b7ed5 | ||
|
|
dda23baf42 | ||
|
|
8575abf599 | ||
|
|
feea0532cd | ||
|
|
d3e8ae1820 | ||
|
|
91a9a959a2 | ||
|
|
04eae51d11 | ||
|
|
8fb707e16d | ||
|
|
4138d5aa75 | ||
|
|
fc654a4cec | ||
|
|
26b5f55cba | ||
|
|
3f572e6bf2 | ||
|
|
941ad6bc62 | ||
|
|
5d1d93e163 | ||
|
|
35fba5bfdd | ||
|
|
887834da91 | ||
|
|
107293c80e | ||
|
|
e3c4ebd59a | ||
|
|
d99ffde7c0 | ||
|
|
198c34ce21 | ||
|
|
0eba88bbfe | ||
|
|
aeea4430d5 | ||
|
|
4b15c4215c | ||
|
|
50452207d9 | ||
|
|
01fcad9b9c | ||
|
|
eb41253764 | ||
|
|
89625e54cf | ||
|
|
58f7141c96 | ||
|
|
e56c6402a7 | ||
|
|
d0eb8ddc30 | ||
|
|
a6c28a5faa | ||
|
|
d35bd15762 | ||
|
|
8b8220c4f7 | ||
|
|
5fe3b0ad71 | ||
|
|
4c8c87a935 | ||
|
|
bb10a51b39 | ||
|
|
df01f7a4eb | ||
|
|
e84790ef79 | ||
|
|
369a8ee17b | ||
|
|
84e21ade6b | ||
|
|
703b0535a4 | ||
|
|
155264ae12 | ||
|
|
31e2ce03c3 | ||
|
|
e969505ae4 | ||
|
|
26e2f1a998 | ||
|
|
2682d5a9cf | ||
|
|
2191592e80 | ||
|
|
18f758294e | ||
|
|
f95c1c61dd | ||
|
|
8c8dcdd521 | ||
|
|
141c133818 | ||
|
|
0f03e55cd1 | ||
|
|
9e6ba92a11 | ||
|
|
762561f88e | ||
|
|
084fe38922 | ||
|
|
63a2a935fc | ||
|
|
64fce8438b | ||
|
|
f92beb4e14 | ||
|
|
f7ce2e8d95 | ||
|
|
3975d82b3b | ||
|
|
d87aa33ec5 | ||
|
|
1b78f4d1ea | ||
|
|
b3704597f3 | ||
|
|
16f797a7d7 | ||
|
|
ee700ec01a | ||
|
|
9b3c951ab7 | ||
|
|
22d17e79e3 | ||
|
|
6d3088a00b | ||
|
|
84202c7471 | ||
|
|
96a05516f9 | ||
|
|
4f6a942595 | ||
|
|
c4b0a37b21 | ||
|
|
9322f4baef | ||
|
|
fa0a1e7261 | ||
|
|
4ad08794c9 | ||
|
|
c0f600764b | ||
|
|
f139e07380 | ||
|
|
c6786eeb2d | ||
|
|
57b85b8155 | ||
|
|
2b1194c57e | ||
|
|
e6dd121f52 | ||
|
|
e600217666 | ||
|
|
bc17ca7ed9 | ||
|
|
1916410316 | ||
|
|
dddfbec92a | ||
|
|
75a88de55c | ||
|
|
2466f4d152 | ||
|
|
39283c8a35 | ||
|
|
46c2f55545 | ||
|
|
fc2afcbcbd | ||
|
|
fa0a9653d2 | ||
|
|
181267e20e | ||
|
|
75e8ea383c | ||
|
|
8c8b58a7de | ||
|
|
b961e07c57 | ||
|
|
0b80d1481a | ||
|
|
89550e7121 | ||
|
|
370c218c63 | ||
|
|
b972dcb0ae | ||
|
|
0bfa9811f7 | ||
|
|
aa9b2c31f4 | ||
|
|
cff75db6a4 | ||
|
|
75252e4a89 | ||
|
|
2089405e1b | ||
|
|
a379eec9d9 | ||
|
|
45d5339fcb | ||
|
|
bb5637d46a | ||
|
|
1f05d5bf4a | ||
|
|
ff87da9c3b | ||
|
|
3d81b75f44 | ||
|
|
baba6d67e6 | ||
|
|
04c0564fe2 | ||
|
|
91cfdb81f5 | ||
|
|
deae7bf33c | ||
|
|
04a0da1f92 | ||
|
|
9486df0226 | ||
|
|
948a5d25c2 | ||
|
|
f7c31cd210 | ||
|
|
696e7b2833 | ||
|
|
e76cf1217f | ||
|
|
543e37f662 | ||
|
|
c514cb752d | ||
|
|
c0ca93ae6f | ||
|
|
38a89d49ae | ||
|
|
6531126eb2 | ||
|
|
25d0e59ef8 | ||
|
|
b0db08fd2b | ||
|
|
07addf74fd | ||
|
|
52c7c738ca | ||
|
|
5c32b32011 | ||
|
|
fe61cff079 | ||
|
|
fbab1e55bb | ||
|
|
1bfd07567e | ||
|
|
f97c4c8d9d | ||
|
|
a3c55462a8 | ||
|
|
bbb9a504a8 | ||
|
|
dedc7d885c | ||
|
|
c5ac96e9e7 | ||
|
|
9959c5f17f | ||
|
|
e8d0a363fc | ||
|
|
935b7c1c0f | ||
|
|
15ce0ae57c | ||
|
|
67703a73de | ||
|
|
f96ce5674b | ||
|
|
7f0b204292 | ||
|
|
83b1ae4833 | ||
|
|
753cc63d96 | ||
|
|
5dac8e055f | ||
|
|
c3a8eb1c10 | ||
|
|
0f2a5403db | ||
|
|
dcce84714e | ||
|
|
eb8130f48a | ||
|
|
aa58f66806 | ||
|
|
a3dc591b8e | ||
|
|
5ee1bd7ba4 | ||
|
|
dbedf33b9f | ||
|
|
0f02c9540c | ||
|
|
06922674c8 | ||
|
|
8ad7da066c | ||
|
|
e1503add41 | ||
|
|
6fea75afde | ||
|
|
6a773289e7 | ||
|
|
ade252f13b | ||
|
|
bb2e361004 | ||
|
|
b24facb73d | ||
|
|
014d58a757 | ||
|
|
1d16e16b30 | ||
|
|
249a523dd3 | ||
|
|
8d72ef8d1e | ||
|
|
bc8f0208aa | ||
|
|
ee25b6106a | ||
|
|
5c1b135304 | ||
|
|
2f2029fed5 | ||
|
|
57273d364b | ||
|
|
84289d1d69 | ||
|
|
98e2746e31 | ||
|
|
c00ec0cbe4 | ||
|
|
1a40bceb1d | ||
|
|
411a6cc472 | ||
|
|
1e2676df26 | ||
|
|
364fca5cea | ||
|
|
87e1efa997 | ||
|
|
6709084e2f | ||
|
|
6b1f915ebc | ||
|
|
78b9bd77f5 | ||
|
|
a9273c5da5 | ||
|
|
14128656db | ||
|
|
1557287c64 | ||
|
|
e7e467fb3a | ||
|
|
5fde7d8b12 | ||
|
|
3c086f5f7f | ||
|
|
c0084f43dd | ||
|
|
ddbd4fd881 | ||
|
|
7826e39fcf | ||
|
|
06ae4258be | ||
|
|
d9037fe2be | ||
|
|
1d14972e41 | ||
|
|
05fa9cb379 | ||
|
|
59e14c25df | ||
|
|
fc640d3a09 | ||
|
|
e1f67295b4 | ||
|
|
22ac80e83a | ||
|
|
c7aa6b587b | ||
|
|
8d1848bebe | ||
|
|
527c0af1c3 | ||
|
|
a20fae0364 | ||
|
|
15b1a1f909 | ||
|
|
80b25daac7 | ||
|
|
70b30d5ca4 | ||
|
|
0b2fc621fc | ||
|
|
171e39b230 | ||
|
|
690a44e40e | ||
|
|
d9a3b26e47 | ||
|
|
1eec59e091 | ||
|
|
96ce49ec4e | ||
|
|
ae63e4b4f0 | ||
|
|
e2fb588eb9 | ||
|
|
382a6863b5 | ||
|
|
7b975bc1ff | ||
|
|
467fe30a5e | ||
|
|
4415aa5c2e | ||
|
|
17ab38502d | ||
|
|
9fa8c959ee | ||
|
|
f29c6049fc | ||
|
|
e44fa5db8e | ||
|
|
03ea05b860 | ||
|
|
b8678c9d4b | ||
|
|
13823a7743 | ||
|
|
b94d87ae2d | ||
|
|
e0c5f7ff1b | ||
|
|
b22ecbe174 | ||
|
|
c41be436c6 | ||
|
|
e022ffce0f | ||
|
|
cfe65f1e72 | ||
|
|
b18595ae07 | ||
|
|
d27630626a | ||
|
|
c473c7cb53 | ||
|
|
ef3526b3b8 | ||
|
|
d4ee7277c0 | ||
|
|
4a3efa5d45 | ||
|
|
a14f0d46d7 |
@@ -1,3 +1,4 @@
|
||||
---
|
||||
version: 2
|
||||
|
||||
jobs:
|
||||
@@ -13,10 +14,10 @@ jobs:
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
docker pull rclone/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
--image=billziss/xgo-cgofuse \
|
||||
--image=rclone/xgo-cgofuse \
|
||||
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
.
|
||||
@@ -29,6 +30,21 @@ jobs:
|
||||
command: |
|
||||
mkdir -p /tmp/rclone.dist
|
||||
cp -R rclone-* /tmp/rclone.dist
|
||||
mkdir build
|
||||
cp -R rclone-* build/
|
||||
|
||||
- run:
|
||||
name: Build rclone
|
||||
command: |
|
||||
go version
|
||||
go build
|
||||
|
||||
- run:
|
||||
name: Upload artifacts
|
||||
command: |
|
||||
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
|
||||
make circleci_upload
|
||||
fi
|
||||
|
||||
- store_artifacts:
|
||||
path: /tmp/rclone.dist
|
||||
|
||||
31
.github/ISSUE_TEMPLATE.md
vendored
Normal file
31
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
<!--
|
||||
|
||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue for a quick response.
|
||||
|
||||
If you are reporting a bug or asking for a new feature then please use one of the templates here:
|
||||
|
||||
https://github.com/ncw/rclone/issues/new
|
||||
|
||||
otherwise fill in the form below.
|
||||
|
||||
Thank you
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
|
||||
#### Output of `rclone version`
|
||||
|
||||
|
||||
|
||||
#### Describe the issue
|
||||
|
||||
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a problem with rclone
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
Hi!
|
||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||
|
||||
We understand you are having a problem with rclone or have an idea for an improvement - we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue. We'll reply quickly and it won't increase our massive issue backlog.
|
||||
instead of filing an issue for a quick response.
|
||||
|
||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
||||
|
||||
@@ -16,9 +19,7 @@ If you think you might have found a bug, please can you try to replicate it with
|
||||
|
||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
||||
|
||||
If you have an idea for an improvement, then please search the old issues first and if you don't find your idea, make a new issue.
|
||||
|
||||
Thanks
|
||||
Thank you
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
@@ -27,17 +28,23 @@ The Rclone Developers
|
||||
#### What is the problem you are having with rclone?
|
||||
|
||||
|
||||
#### What is your rclone version (eg output from `rclone -V`)
|
||||
|
||||
#### What is your rclone version (output from `rclone version`)
|
||||
|
||||
|
||||
|
||||
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
|
||||
|
||||
|
||||
#### Which cloud storage system are you using? (eg Google Drive)
|
||||
|
||||
|
||||
|
||||
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
|
||||
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
36
.github/ISSUE_TEMPLATE/Feature.md
vendored
Normal file
36
.github/ISSUE_TEMPLATE/Feature.md
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest a new feature or enhancement for rclone
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
Welcome :-)
|
||||
|
||||
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||
|
||||
Here is a checklist of things to do:
|
||||
|
||||
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
|
||||
2. Discuss on the forum first: https://forum.rclone.org/
|
||||
3. Make a feature request issue (this is the right place!).
|
||||
4. Be prepared to get involved making the feature :-)
|
||||
|
||||
Looking forward to your great idea!
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
|
||||
#### What is your current rclone version (output from `rclone version`)?
|
||||
|
||||
|
||||
|
||||
#### What problem are you are trying to solve?
|
||||
|
||||
|
||||
|
||||
#### How do you think rclone should be changed to solve that?
|
||||
|
||||
|
||||
29
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
29
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
<!--
|
||||
Thank you very much for contributing code or documentation to rclone! Please
|
||||
fill out the following questions to make it easier for us to review your
|
||||
changes.
|
||||
|
||||
You do not need to check all the boxes below all at once, feel free to take
|
||||
your time and add more commits. If you're done and ready for review, please
|
||||
check the last box.
|
||||
-->
|
||||
|
||||
#### What is the purpose of this change?
|
||||
|
||||
<!--
|
||||
Describe the changes here
|
||||
-->
|
||||
|
||||
#### Was the change discussed in an issue or in the forum before?
|
||||
|
||||
<!--
|
||||
Link issues and relevant forum posts here.
|
||||
-->
|
||||
|
||||
#### Checklist
|
||||
|
||||
- [ ] I have read the [contribution guidelines](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||
- [ ] I have added tests for all changes in this PR if appropriate.
|
||||
- [ ] I have added documentation for the changes if appropriate.
|
||||
- [ ] All commit messages are in [house style](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||
- [ ] I'm done, this Pull Request is ready for review :-)
|
||||
30
.golangci.yml
Normal file
30
.golangci.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
# golangci-lint configuration options
|
||||
|
||||
run:
|
||||
build-tags:
|
||||
- cmount
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- golint
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
- govet
|
||||
- unconvert
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
|
||||
issues:
|
||||
# Enable some lints excluded by default
|
||||
exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"Enable": [
|
||||
"deadcode",
|
||||
"errcheck",
|
||||
"goimports",
|
||||
"golint",
|
||||
"ineffassign",
|
||||
"structcheck",
|
||||
"varcheck",
|
||||
"vet"
|
||||
],
|
||||
"EnableGC": true,
|
||||
"Vendor": true
|
||||
}
|
||||
@@ -4,11 +4,11 @@ dist: trusty
|
||||
os:
|
||||
- linux
|
||||
go:
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12rc1
|
||||
- tip
|
||||
go_import_path: github.com/ncw/rclone
|
||||
before_install:
|
||||
@@ -25,6 +25,7 @@ script:
|
||||
env:
|
||||
global:
|
||||
- GOTAGS=cmount
|
||||
- GO111MODULE=off
|
||||
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
|
||||
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
|
||||
@@ -43,7 +44,7 @@ matrix:
|
||||
- go: tip
|
||||
include:
|
||||
- os: osx
|
||||
go: 1.11.x
|
||||
go: 1.12rc1
|
||||
env: GOTAGS=""
|
||||
cache:
|
||||
directories:
|
||||
@@ -55,5 +56,5 @@ deploy:
|
||||
on:
|
||||
repo: ncw/rclone
|
||||
all_branches: true
|
||||
go: 1.11.x
|
||||
go: 1.12rc1
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
|
||||
@@ -21,14 +21,14 @@ with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
## Submitting a pull request ##
|
||||
|
||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||
like to implement then please submit a pull request via Github.
|
||||
like to implement then please submit a pull request via GitHub.
|
||||
|
||||
If it is a big feature then make an issue first so it can be discussed.
|
||||
|
||||
You'll need a Go environment set up with GOPATH set. See [the Go
|
||||
getting started docs](https://golang.org/doc/install) for more info.
|
||||
|
||||
First in your web browser press the fork button on [rclone's Github
|
||||
First in your web browser press the fork button on [rclone's GitHub
|
||||
page](https://github.com/ncw/rclone).
|
||||
|
||||
Now in your terminal
|
||||
@@ -64,22 +64,23 @@ packages which you can install with
|
||||
|
||||
Make sure you
|
||||
|
||||
* Add documentation for a new feature (see below for where)
|
||||
* Add unit tests for a new feature
|
||||
* Add [documentation](#writing-documentation) for a new feature.
|
||||
* Follow the [commit message guidelines](#commit-messages).
|
||||
* Add [unit tests](#testing) for a new feature
|
||||
* squash commits down to one per feature
|
||||
* rebase to master `git rebase master`
|
||||
* rebase to master with `git rebase master`
|
||||
|
||||
When you are done with that
|
||||
|
||||
git push origin my-new-feature
|
||||
|
||||
Go to the Github website and click [Create pull
|
||||
Go to the GitHub website and click [Create pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
You patch will get reviewed and you might get asked to fix some stuff.
|
||||
|
||||
If so, then make the changes in the same branch, squash the commits,
|
||||
rebase it to master then push it to Github with `--force`.
|
||||
rebase it to master then push it to GitHub with `--force`.
|
||||
|
||||
## Enabling CI for your fork ##
|
||||
|
||||
@@ -122,6 +123,13 @@ but they can be run against any of the remotes.
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
|
||||
If you want to use the integration test framework to run these tests
|
||||
all together with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/ncw/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
|
||||
@@ -181,10 +189,14 @@ with modules beneath.
|
||||
|
||||
If you are adding a new feature then please update the documentation.
|
||||
|
||||
If you add a new flag, then if it is a general flag, document it in
|
||||
If you add a new general flag (not for a backend), then document it in
|
||||
`docs/content/docs.md` - the flags there are supposed to be in
|
||||
alphabetical order. If it is a remote specific flag, then document it
|
||||
in `docs/content/remote.md`.
|
||||
alphabetical order.
|
||||
|
||||
If you add a new backend option/flag, then it should be documented in
|
||||
the source file in the `Help:` field. The first line of this is used
|
||||
for the flag help, the remainder is shown to the user in `rclone
|
||||
config` and is added to the docs with `make backenddocs`.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The MANUAL.*, rclone.1, web site etc are all auto generated
|
||||
@@ -203,14 +215,20 @@ file.
|
||||
## Commit messages ##
|
||||
|
||||
Please make the first line of your commit message a summary of the
|
||||
change, and prefix it with the directory of the change followed by a
|
||||
colon. The changelog gets made by looking at just these first lines
|
||||
so make it good!
|
||||
change that a user (not a developer) of rclone would like to read, and
|
||||
prefix it with the directory of the change followed by a colon. The
|
||||
changelog gets made by looking at just these first lines so make it
|
||||
good!
|
||||
|
||||
If you have more to say about the commit, then enter a blank line and
|
||||
carry on the description. Remember to say why the change was needed -
|
||||
the commit itself shows what was changed.
|
||||
|
||||
Writing more is better than less. Comparing the behaviour before the
|
||||
change to that after the change is very useful. Imagine you are
|
||||
writing to yourself in 12 months time when you've forgotten everything
|
||||
about what you just did and you need to get up to speed quickly.
|
||||
|
||||
If the change fixes an issue then write `Fixes #1234` in the commit
|
||||
message. This can be on the subject line if it will fit. If you
|
||||
don't want to close the associated issue just put `#1234` and the
|
||||
@@ -258,9 +276,8 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
||||
instructions below. These will fetch the dependency, add it to
|
||||
`go.mod` and `go.sum` and vendor it for older go versions.
|
||||
|
||||
export GO111MODULE=on
|
||||
go get github.com/ncw/new_dependency
|
||||
go mod vendor
|
||||
GO111MODULE=on go get github.com/ncw/new_dependency
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
You can add constraints on that package when doing `go get` (see the
|
||||
go docs linked above), but don't unless you really need to.
|
||||
@@ -275,9 +292,8 @@ in `vendor`.
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
export GO111MODULE=on
|
||||
go get -u github.com/pkg/errors
|
||||
go mod vendor
|
||||
GO111MODULE=on go get -u github.com/pkg/errors
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
Check in in a single commit as above.
|
||||
|
||||
@@ -334,7 +350,13 @@ Unit tests
|
||||
|
||||
Integration tests
|
||||
|
||||
* Add your fs to `fstest/test_all/test_all.go`
|
||||
* Add your backend to `fstest/test_all/config.yaml`
|
||||
* Once you've done that then you can use the integration test framework from the project root:
|
||||
* go install ./...
|
||||
* test_all -backend remote
|
||||
|
||||
Or if you want to run the integration tests manually:
|
||||
|
||||
* Make sure integration tests pass with
|
||||
* `cd fs/operations`
|
||||
* `go test -v -remote TestRemote:`
|
||||
@@ -349,11 +371,10 @@ See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
|
||||
|
||||
* `README.md` - main Github page
|
||||
* `docs/content/remote.md` - main docs page
|
||||
* `README.md` - main GitHub page
|
||||
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
* `docs/content/overview.md` - overview docs
|
||||
* `docs/content/docs.md` - list of remotes in config section
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
* `cmd/cmd.go` - the main help for rclone
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
# Maintainers guide for rclone #
|
||||
|
||||
Current active maintainers of rclone are
|
||||
Current active maintainers of rclone are:
|
||||
|
||||
* Nick Craig-Wood @ncw
|
||||
* Stefan Breunig @breunigs
|
||||
* Ishuah Kariuki @ishuah
|
||||
* Remus Bunduc @remusb - cache subsystem maintainer
|
||||
* Fabian Möller @B4dM4n
|
||||
* Alex Chen @Cnly
|
||||
* Sandeep Ummadi @sandeepkru
|
||||
| Name | GitHub ID | Specific Responsibilities |
|
||||
| :--------------- | :---------- | :-------------------------- |
|
||||
| Nick Craig-Wood | @ncw | overall project health |
|
||||
| Stefan Breunig | @breunigs | |
|
||||
| Ishuah Kariuki | @ishuah | |
|
||||
| Remus Bunduc | @remusb | cache backend |
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
@@ -58,7 +61,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
|
||||
|
||||
Try to process pull requests promptly!
|
||||
|
||||
Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
|
||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||
|
||||
|
||||
5346
MANUAL.html
5346
MANUAL.html
File diff suppressed because it is too large
Load Diff
5879
MANUAL.txt
5879
MANUAL.txt
File diff suppressed because it is too large
Load Diff
54
Makefile
54
Makefile
@@ -11,7 +11,7 @@ ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifneq ($(TAG),$(LAST_TAG))
|
||||
TAG := $(TAG)-beta
|
||||
endif
|
||||
@@ -50,10 +50,9 @@ version:
|
||||
|
||||
# Full suite of integration tests
|
||||
test: rclone
|
||||
go install github.com/ncw/rclone/fstest/test_all
|
||||
-go test -v -count 1 $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
|
||||
-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
|
||||
@echo "Written logs in test.log and fs/test_all.log"
|
||||
go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all
|
||||
-test_all 2>&1 | tee test_all.log
|
||||
@echo "Written logs in test_all.log"
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
@@ -65,30 +64,20 @@ endif
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
ifdef FULL_TESTS
|
||||
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||
errcheck $(BUILDTAGS) ./...
|
||||
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
|
||||
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
|
||||
@# we still run go vet for -printfuncs which golangci-lint doesn't do yet
|
||||
@# see: https://github.com/golangci/golangci-lint/issues/204
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
@go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||
@golangci-lint run ./...
|
||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||
else
|
||||
@echo Skipping source quality tests as version of go too old
|
||||
endif
|
||||
|
||||
gometalinter_install:
|
||||
go get -u github.com/alecthomas/gometalinter
|
||||
gometalinter --install --update
|
||||
|
||||
# We aren't using gometalinter as the default linter yet because
|
||||
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
|
||||
# 2. can't get -printfuncs working with the vet linter
|
||||
gometalinter:
|
||||
gometalinter ./...
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
ifdef FULL_TESTS
|
||||
go get -u github.com/kisielk/errcheck
|
||||
go get -u golang.org/x/tools/cmd/goimports
|
||||
go get -u github.com/golang/lint/golint
|
||||
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||
endif
|
||||
|
||||
# Get the release dependencies
|
||||
@@ -99,15 +88,15 @@ release_dep:
|
||||
# Update dependencies
|
||||
update:
|
||||
GO111MODULE=on go get -u ./...
|
||||
GO111MODULE=on go tidy
|
||||
GO111MODULE=on go vendor
|
||||
GO111MODULE=on go mod tidy
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
rclone.1: MANUAL.md
|
||||
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
|
||||
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
|
||||
./bin/make_manual.py
|
||||
|
||||
MANUAL.html: MANUAL.md
|
||||
@@ -117,7 +106,10 @@ MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
rclone gendocs docs/content/commands/
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
./bin/make_backend_docs.py
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
@@ -183,6 +175,13 @@ ifndef BRANCH_PATH
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
circleci_upload:
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifndef BRANCH_PATH
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
@@ -190,7 +189,7 @@ endif
|
||||
|
||||
travis_beta:
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
|
||||
endif
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)
|
||||
@@ -229,4 +228,3 @@ startdev:
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
|
||||
106
README.md
106
README.md
@@ -2,10 +2,11 @@
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
[Download](https://rclone.org/downloads/) |
|
||||
[Contributing](CONTRIBUTING.md) |
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
[Forum](https://forum.rclone.org/)
|
||||
[Forum](https://forum.rclone.org/) |
|
||||
[G+](https://google.com/+RcloneOrg)
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone)
|
||||
@@ -13,50 +14,83 @@
|
||||
[](https://circleci.com/gh/ncw/rclone/tree/master)
|
||||
[](https://godoc.org/github.com/ncw/rclone)
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
# Rclone
|
||||
|
||||
* Amazon Drive ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
|
||||
* Backblaze B2
|
||||
* Box
|
||||
* Dropbox
|
||||
* FTP
|
||||
* Google Cloud Storage
|
||||
* Google Drive
|
||||
* HTTP
|
||||
* Hubic
|
||||
* Jottacloud
|
||||
* Mega
|
||||
* Microsoft Azure Blob Storage
|
||||
* Microsoft OneDrive
|
||||
* OpenDrive
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
|
||||
* pCloud
|
||||
* QingStor
|
||||
* SFTP
|
||||
* Webdav / Owncloud / Nextcloud
|
||||
* Yandex Disk
|
||||
* The local filesystem
|
||||
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
|
||||
|
||||
Features
|
||||
## Storage providers
|
||||
|
||||
* MD5/SHA1 hashes checked at all times for file integrity
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
|
||||
## Features
|
||||
|
||||
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* Copy mode to just copy new/changed files
|
||||
* Sync (one way) mode to make a directory identical
|
||||
* Check mode to check for file hash equality
|
||||
* Can sync to and from network, eg two different cloud accounts
|
||||
* Optional encryption (Crypt)
|
||||
* Optional FUSE mount
|
||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
|
||||
See the home page for installation, usage, documentation, changelog
|
||||
and configuration walkthroughs.
|
||||
## Installation & documentation
|
||||
|
||||
* https://rclone.org/
|
||||
Please see the [rclone website](https://rclone.org/) for:
|
||||
|
||||
* [Installation](https://rclone.org/install/)
|
||||
* [Documentation & configuration](https://rclone.org/docs/)
|
||||
* [Changelog](https://rclone.org/changelog/)
|
||||
* [FAQ](https://rclone.org/faq/)
|
||||
* [Storage providers](https://rclone.org/overview/)
|
||||
* [Forum](https://forum.rclone.org/)
|
||||
* ...and more
|
||||
|
||||
## Downloads
|
||||
|
||||
* https://rclone.org/downloads/
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
COPYING file included in this package).
|
||||
[COPYING file](/COPYING) included in this package).
|
||||
|
||||
15
RELEASE.md
15
RELEASE.md
@@ -32,6 +32,21 @@ Early in the next release cycle update the vendored dependencies
|
||||
* git add new files
|
||||
* git commit -a -v
|
||||
|
||||
If `make update` fails with errors like this:
|
||||
|
||||
```
|
||||
# github.com/cpuguy83/go-md2man/md2man
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
||||
```
|
||||
|
||||
Can be fixed with
|
||||
|
||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
||||
* GO111MODULE=on go mod tidy
|
||||
* GO111MODULE=on go mod vendor
|
||||
|
||||
|
||||
Making a point release. If rclone needs a point release due to some
|
||||
horrendous bug, then
|
||||
* git branch v1.XX v1.XX-fixes
|
||||
|
||||
821
backend/adb/adb.go
Normal file
821
backend/adb/adb.go
Normal file
@@ -0,0 +1,821 @@
|
||||
package adb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
adb "github.com/thinkhy/go-adb"
|
||||
"github.com/thinkhy/go-adb/wire"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "adb",
|
||||
Description: "Android Debug Bridge",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "serial",
|
||||
Help: "The device serial to use. Leave empty for auto selection.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "host",
|
||||
Default: "localhost",
|
||||
Help: "The ADB server host.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Default: 5037,
|
||||
Help: "The ADB server port.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "executable",
|
||||
Help: "The ADB executable path.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Serial string
|
||||
Host string
|
||||
Port uint16
|
||||
Executable string
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
}
|
||||
|
||||
// Fs represents a adb device
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
client *adb.Adb
|
||||
device *execDevice
|
||||
statFunc statFunc
|
||||
statFuncMu sync.Mutex
|
||||
touchFunc touchFunc
|
||||
touchFuncMu sync.Mutex
|
||||
}
|
||||
|
||||
// Object describes a adb file
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("ADB root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if root == "" {
|
||||
root = "/"
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
statFunc: (*Object).statTry,
|
||||
touchFunc: (*Object).touchTry,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
|
||||
f.client, err = adb.NewWithConfig(adb.ServerConfig{
|
||||
Host: opt.Host,
|
||||
Port: int(opt.Port),
|
||||
PathToAdb: opt.Executable,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not configure ADB server")
|
||||
}
|
||||
err = f.client.StartServer()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not start ADB server")
|
||||
}
|
||||
|
||||
serverVersion, err := f.client.ServerVersion()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get ADB server version")
|
||||
}
|
||||
fs.Debugf(f, "ADB server version: 0x%X", serverVersion)
|
||||
|
||||
serials, err := f.client.ListDeviceSerials()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get ADB devices")
|
||||
}
|
||||
descriptor := adb.AnyDevice()
|
||||
if opt.Serial != "" {
|
||||
descriptor = adb.DeviceWithSerial(opt.Serial)
|
||||
}
|
||||
if len(serials) > 1 && opt.Serial == "" {
|
||||
return nil, errors.New("Multiple ADB devices found. Use the serial config to select a specific device")
|
||||
}
|
||||
f.device = &execDevice{f.client.Device(descriptor)}
|
||||
|
||||
// follow symlinks for root pathes
|
||||
entry, err := f.newEntryFollowSymlinks("")
|
||||
switch err {
|
||||
case nil:
|
||||
case fs.ErrorObjectNotFound:
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
switch entry.(type) {
|
||||
case fs.Object:
|
||||
f.root = path.Dir(f.root)
|
||||
return f, fs.ErrorIsFile
|
||||
case nil:
|
||||
return f, nil
|
||||
case fs.Directory:
|
||||
return f, nil
|
||||
default:
|
||||
return nil, errors.Errorf("Invalid root entry type %t", entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Precision of the object storage system
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return 1 * time.Second
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
p := path.Join(f.root, dir)
|
||||
dirEntries, err := f.device.ListDirEntries(p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ListDirEntries")
|
||||
}
|
||||
|
||||
defer fs.CheckClose(dirEntries, &err)
|
||||
|
||||
found := false
|
||||
for dirEntries.Next() {
|
||||
found = true
|
||||
dirEntry := dirEntries.Entry()
|
||||
switch dirEntry.Name {
|
||||
case ".", "..":
|
||||
continue
|
||||
}
|
||||
fsEntry, err := f.entryForDirEntry(path.Join(dir, dirEntry.Name), dirEntry, f.opt.FollowSymlinks)
|
||||
if err != nil {
|
||||
fs.Errorf(p, "Listing error: %q: %v", dirEntry.Name, err)
|
||||
return nil, err
|
||||
} else if fsEntry != nil {
|
||||
entries = append(entries, fsEntry)
|
||||
} else {
|
||||
fs.Debugf(f, "Skipping DirEntry %#v", dirEntry)
|
||||
}
|
||||
}
|
||||
err = dirEntries.Err()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ListDirEntries")
|
||||
}
|
||||
if !found {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) entryForDirEntry(remote string, e *adb.DirEntry, followSymlinks bool) (fs.DirEntry, error) {
|
||||
o := f.newObjectWithInfo(remote, e)
|
||||
// Follow symlinks if required
|
||||
if followSymlinks && (e.Mode&os.ModeSymlink) != 0 {
|
||||
err := f.statFunc(&o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if o.mode.IsDir() {
|
||||
return fs.NewDir(remote, o.modTime), nil
|
||||
}
|
||||
return &o, nil
|
||||
}
|
||||
|
||||
func (f *Fs) newEntry(remote string) (fs.DirEntry, error) {
|
||||
return f.newEntryWithFollow(remote, f.opt.FollowSymlinks)
|
||||
}
|
||||
func (f *Fs) newEntryFollowSymlinks(remote string) (fs.DirEntry, error) {
|
||||
return f.newEntryWithFollow(remote, true)
|
||||
}
|
||||
func (f *Fs) newEntryWithFollow(remote string, followSymlinks bool) (fs.DirEntry, error) {
|
||||
entry, err := f.device.Stat(path.Join(f.root, remote))
|
||||
if err != nil {
|
||||
if adb.HasErrCode(err, adb.FileNoExistError) {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, errors.Wrapf(err, "Stat failed")
|
||||
}
|
||||
return f.entryForDirEntry(remote, entry, followSymlinks)
|
||||
}
|
||||
|
||||
func (f *Fs) newObjectWithInfo(remote string, e *adb.DirEntry) Object {
|
||||
return Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(e.Size),
|
||||
mode: e.Mode,
|
||||
modTime: e.ModifiedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
entry, err := f.newEntry(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, ok := entry.(fs.Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
||||
// return an error or upload it properly (rather than e.g. calling panic).
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
// Temporary Object under construction - info filled in by Update()
|
||||
o := f.newObject(remote)
|
||||
err := o.Update(in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// newObject makes a half completed Object
|
||||
func (f *Fs) newObject(remote string) *Object {
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
p := path.Join(f.root, dir)
|
||||
output, code, err := f.device.execCommandWithExitCode("mkdir -p", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
entry, _ := f.newEntry(p)
|
||||
if _, ok := entry.(fs.Directory); ok {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("mkdir %q failed with %d: %q", dir, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "mkdir")
|
||||
}
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
p := path.Join(f.root, dir)
|
||||
output, code, err := f.device.execCommandWithExitCode("rmdir", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("rmdir %q failed with %d: %q", dir, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "rmdir")
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(t time.Time) error {
|
||||
return o.fs.touchFunc(o, t)
|
||||
}
|
||||
|
||||
func (o *Object) stat() error {
|
||||
return o.statStatArg(statArgC, path.Join(o.fs.root, o.remote))
|
||||
}
|
||||
|
||||
func (o *Object) setMetadata(entry *adb.DirEntry) {
|
||||
// Don't overwrite the values if we don't need to
|
||||
// this avoids upsetting the race detector
|
||||
if o.size != int64(entry.Size) {
|
||||
o.size = int64(entry.Size)
|
||||
}
|
||||
if !o.modTime.Equal(entry.ModifiedAt) {
|
||||
o.modTime = entry.ModifiedAt
|
||||
}
|
||||
if o.mode != entry.Mode {
|
||||
o.mode = decodeEntryMode(uint32(entry.Mode))
|
||||
}
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
const blockSize = 1 << 12
|
||||
|
||||
var offset, count int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if offset > o.size {
|
||||
offset = o.size
|
||||
}
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
} else if count+offset > o.size {
|
||||
count = o.size - offset
|
||||
}
|
||||
fs.Debugf(o, "Open: remote: %q offset: %d count: %d", o.remote, offset, count)
|
||||
|
||||
if count == 0 {
|
||||
return ioutil.NopCloser(bytes.NewReader(nil)), nil
|
||||
}
|
||||
offsetBlocks, offsetRest := offset/blockSize, offset%blockSize
|
||||
countBlocks := (count-1)/blockSize + 1
|
||||
|
||||
conn, err := o.fs.device.execCommand(fmt.Sprintf("sh -c 'dd \"if=$0\" bs=%d skip=%d count=%d 2>/dev/null'", blockSize, offsetBlocks, countBlocks), path.Join(o.fs.root, o.remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &adbReader{
|
||||
ReadCloser: readers.NewLimitedReadCloser(conn, count+offsetRest),
|
||||
skip: offsetRest,
|
||||
expected: count,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
for _, option := range options {
|
||||
if option.Mandatory() {
|
||||
fs.Logf(option, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
written, err := o.writeToFile(path.Join(o.fs.root, o.remote), in, 0666, src.ModTime())
|
||||
if err != nil {
|
||||
if removeErr := o.Remove(); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
expected := src.Size()
|
||||
if expected == -1 {
|
||||
expected = written
|
||||
}
|
||||
for _, t := range []int64{100, 250, 500, 1000, 2500, 5000, 10000} {
|
||||
err = o.stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if o.size == expected {
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(o, "Invalid size after update, expected: %d got: %d", expected, o.size)
|
||||
time.Sleep(time.Duration(t) * time.Millisecond)
|
||||
}
|
||||
return o.stat()
|
||||
}
|
||||
|
||||
// Remove this object
|
||||
func (o *Object) Remove() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("rm", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("rm %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "rm")
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) writeToFile(path string, rd io.Reader, perms os.FileMode, modeTime time.Time) (written int64, err error) {
|
||||
dst, err := o.fs.device.OpenWrite(path, perms, modeTime)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fs.CheckClose(dst, &err)
|
||||
return io.Copy(dst, rd)
|
||||
}
|
||||
|
||||
type statFunc func(*Object) error
|
||||
|
||||
func (o *Object) statTry() error {
|
||||
o.fs.statFuncMu.Lock()
|
||||
defer o.fs.statFuncMu.Unlock()
|
||||
|
||||
for _, f := range []statFunc{
|
||||
(*Object).statStatL, (*Object).statRealPath, (*Object).statReadLink,
|
||||
} {
|
||||
err := f(o)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "%s", err)
|
||||
} else {
|
||||
o.fs.statFunc = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("unable to resolve link target")
|
||||
}
|
||||
|
||||
const (
|
||||
statArgLc = "-Lc"
|
||||
statArgC = "-c"
|
||||
)
|
||||
|
||||
func (o *Object) statStatL() error {
|
||||
return o.statStatArg(statArgLc, path.Join(o.fs.root, o.remote))
|
||||
}
|
||||
|
||||
func (o *Object) statStatArg(arg, path string) error {
|
||||
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("stat %s %s", arg, "%f,%s,%Y"), path)
|
||||
output = strings.TrimSpace(output)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("stat %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "stat")
|
||||
}
|
||||
|
||||
parts := strings.Split(output, ",")
|
||||
if len(parts) != 3 {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
|
||||
mode, err := strconv.ParseUint(parts[0], 16, 32)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
size, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
modTime, err := strconv.ParseInt(parts[2], 10, 64)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
|
||||
o.size = int64(size)
|
||||
o.modTime = time.Unix(modTime, 0)
|
||||
o.mode = decodeEntryMode(uint32(mode))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) statReadLink() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("readlink -f", p)
|
||||
output = strings.TrimSuffix(output, "\n")
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("readlink %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "readlink")
|
||||
}
|
||||
return o.statStatArg(statArgC, output)
|
||||
}
|
||||
func (o *Object) statRealPath() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("realpath", p)
|
||||
output = strings.TrimSuffix(output, "\n")
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("realpath %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "realpath")
|
||||
}
|
||||
return o.statStatArg(statArgC, output)
|
||||
}
|
||||
|
||||
type touchFunc func(*Object, time.Time) error
|
||||
|
||||
func (o *Object) touchTry(t time.Time) error {
|
||||
o.fs.touchFuncMu.Lock()
|
||||
defer o.fs.touchFuncMu.Unlock()
|
||||
|
||||
for _, f := range []touchFunc{
|
||||
(*Object).touchCmd, (*Object).touchCd,
|
||||
} {
|
||||
err := f(o, t)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "%s", err)
|
||||
} else {
|
||||
o.fs.touchFunc = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("unable to resolve link target")
|
||||
}
|
||||
|
||||
const (
|
||||
touchArgCmd = "-cmd"
|
||||
touchArgCd = "-cd"
|
||||
)
|
||||
|
||||
func (o *Object) touchCmd(t time.Time) error {
|
||||
return o.touchStatArg(touchArgCmd, path.Join(o.fs.root, o.remote), t)
|
||||
}
|
||||
func (o *Object) touchCd(t time.Time) error {
|
||||
return o.touchStatArg(touchArgCd, path.Join(o.fs.root, o.remote), t)
|
||||
}
|
||||
|
||||
func (o *Object) touchStatArg(arg, path string, t time.Time) error {
|
||||
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("touch %s %s", arg, t.Format(time.RFC3339Nano)), path)
|
||||
output = strings.TrimSpace(output)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("touch %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "touch")
|
||||
}
|
||||
|
||||
err = o.stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if diff, ok := checkTimeEqualWithPrecision(t, o.modTime, o.fs.Precision()); !ok {
|
||||
return errors.Errorf("touch %q to %s was ineffective: %d", o.remote, t, diff)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
|
||||
dt := t0.Sub(t1)
|
||||
if dt >= precision || dt <= -precision {
|
||||
return dt, false
|
||||
}
|
||||
return dt, true
|
||||
}
|
||||
|
||||
func decodeEntryMode(entryMode uint32) os.FileMode {
|
||||
const (
|
||||
unixIFBLK = 0x6000
|
||||
unixIFMT = 0xf000
|
||||
unixIFCHR = 0x2000
|
||||
unixIFDIR = 0x4000
|
||||
unixIFIFO = 0x1000
|
||||
unixIFLNK = 0xa000
|
||||
unixIFREG = 0x8000
|
||||
unixIFSOCK = 0xc000
|
||||
unixISGID = 0x400
|
||||
unixISUID = 0x800
|
||||
unixISVTX = 0x200
|
||||
)
|
||||
|
||||
mode := os.FileMode(entryMode & 0777)
|
||||
switch entryMode & unixIFMT {
|
||||
case unixIFBLK:
|
||||
mode |= os.ModeDevice
|
||||
case unixIFCHR:
|
||||
mode |= os.ModeDevice | os.ModeCharDevice
|
||||
case unixIFDIR:
|
||||
mode |= os.ModeDir
|
||||
case unixIFIFO:
|
||||
mode |= os.ModeNamedPipe
|
||||
case unixIFLNK:
|
||||
mode |= os.ModeSymlink
|
||||
case unixIFREG:
|
||||
// nothing to do
|
||||
case unixIFSOCK:
|
||||
mode |= os.ModeSocket
|
||||
}
|
||||
if entryMode&unixISGID != 0 {
|
||||
mode |= os.ModeSetgid
|
||||
}
|
||||
if entryMode&unixISUID != 0 {
|
||||
mode |= os.ModeSetuid
|
||||
}
|
||||
if entryMode&unixISVTX != 0 {
|
||||
mode |= os.ModeSticky
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
type execDevice struct {
|
||||
*adb.Device
|
||||
}
|
||||
|
||||
func (d *execDevice) execCommandWithExitCode(cmd string, arg string) (string, int, error) {
|
||||
cmdLine := fmt.Sprintf("sh -c '%s \"$0\"; echo :$?' '%s'", cmd, strings.Replace(arg, "'", "'\\''", -1))
|
||||
fs.Debugf("adb", "exec: %s", cmdLine)
|
||||
conn, err := d.execCommand(cmdLine)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
resp, err := conn.ReadUntilEof()
|
||||
if err != nil {
|
||||
return "", -1, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
|
||||
outStr := string(resp)
|
||||
idx := strings.LastIndexByte(outStr, ':')
|
||||
if idx == -1 {
|
||||
return outStr, -1, fmt.Errorf("adb shell aborted, can not parse exit code")
|
||||
}
|
||||
exitCode, _ := strconv.Atoi(strings.TrimSpace(outStr[idx+1:]))
|
||||
if exitCode != 0 {
|
||||
err = adb.ShellExitError{Command: cmdLine, ExitCode: exitCode}
|
||||
}
|
||||
return outStr[:idx], exitCode, err
|
||||
}
|
||||
|
||||
func (d *execDevice) execCommand(cmd string, args ...string) (*wire.Conn, error) {
|
||||
cmd = prepareCommandLineEscaped(cmd, args...)
|
||||
conn, err := d.Dial()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
defer func() {
|
||||
if err != nil && conn != nil {
|
||||
_ = conn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
req := fmt.Sprintf("exec:%s", cmd)
|
||||
|
||||
if err = conn.SendMessage([]byte(req)); err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
if _, err = conn.ReadStatus(req); err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func prepareCommandLineEscaped(cmd string, args ...string) string {
|
||||
for i, arg := range args {
|
||||
args[i] = fmt.Sprintf("'%s'", strings.Replace(arg, "'", "'\\''", -1))
|
||||
}
|
||||
|
||||
// Prepend the command to the args array.
|
||||
if len(args) > 0 {
|
||||
cmd = fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
type adbReader struct {
|
||||
io.ReadCloser
|
||||
skip int64
|
||||
read int64
|
||||
expected int64
|
||||
}
|
||||
|
||||
func (r *adbReader) Read(b []byte) (n int, err error) {
|
||||
n, err = r.ReadCloser.Read(b)
|
||||
if s := r.skip; n > 0 && s > 0 {
|
||||
_n := int64(n)
|
||||
if _n <= s {
|
||||
r.skip -= _n
|
||||
return r.Read(b)
|
||||
}
|
||||
r.skip = 0
|
||||
copy(b, b[s:n])
|
||||
n -= int(s)
|
||||
}
|
||||
r.read += int64(n)
|
||||
if err == io.EOF && r.read < r.expected {
|
||||
fs.Debugf("adb", "Read: read: %d expected: %d n: %d", r.read, r.expected, n)
|
||||
return n, io.ErrUnexpectedEOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
20
backend/adb/adb_test.go
Normal file
20
backend/adb/adb_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Test ADB filesystem interface
|
||||
package adb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/adb"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAdb:/data/local/tmp",
|
||||
NilObject: (*adb.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: "TestAdb", Key: "copy_links", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -2,13 +2,12 @@ package alias
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -31,7 +30,7 @@ type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path.
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -47,13 +46,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(opt.Remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
_, configName, fsPath, err := fs.ParseRemote(opt.Remote)
|
||||
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = path.Join(fsPath, filepath.ToSlash(root))
|
||||
if configName == "local" {
|
||||
return fs.NewFs(root)
|
||||
}
|
||||
return fs.NewFs(configName + ":" + root)
|
||||
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ func TestNewFS(t *testing.T) {
|
||||
wantEntry := test.entries[i]
|
||||
|
||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||
require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what)
|
||||
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
||||
_, isDir := gotEntry.(fs.Directory)
|
||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package all
|
||||
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/backend/adb"
|
||||
_ "github.com/ncw/rclone/backend/alias"
|
||||
_ "github.com/ncw/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/ncw/rclone/backend/azureblob"
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/go-acd"
|
||||
acd "github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
@@ -97,13 +97,42 @@ func init() {
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_wait_per_gb",
|
||||
Help: "Additional time per GB to wait after a failed complete upload to see if it appears.",
|
||||
Name: "upload_wait_per_gb",
|
||||
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
|
||||
|
||||
Sometimes Amazon Drive gives an error when a file has been fully
|
||||
uploaded but the file appears anyway after a little while. This
|
||||
happens sometimes for files over 1GB in size and nearly every time for
|
||||
files bigger than 10GB. This parameter controls the time rclone waits
|
||||
for the file to appear.
|
||||
|
||||
The default value for this parameter is 3 minutes per GB, so by
|
||||
default it will wait 3 minutes for every GB uploaded to see if the
|
||||
file appears.
|
||||
|
||||
You can disable this feature by setting it to 0. This may cause
|
||||
conflict errors as rclone retries the failed upload but the file will
|
||||
most likely appear correctly eventually.
|
||||
|
||||
These values were determined empirically by observing lots of uploads
|
||||
of big files for a range of file sizes.
|
||||
|
||||
Upload with the "-v" flag to see more info about what rclone is doing
|
||||
in this situation.`,
|
||||
Default: fs.Duration(180 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "templink_threshold",
|
||||
Help: "Files >= this size will be downloaded via their tempLink.",
|
||||
Name: "templink_threshold",
|
||||
Help: `Files >= this size will be downloaded via their tempLink.
|
||||
|
||||
Files this size or more will be downloaded via their "tempLink". This
|
||||
is to work around a problem with Amazon Drive which blocks downloads
|
||||
of files bigger than about 10GB. The default for this is 9GB which
|
||||
shouldn't need to be changed.
|
||||
|
||||
To download files above this threshold, rclone requests a "tempLink"
|
||||
which downloads the file through a temporary URL directly from the
|
||||
underlying S3 storage.`,
|
||||
Default: defaultTempLinkThreshold,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -235,7 +264,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Amazon Drive: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
||||
}
|
||||
|
||||
c := acd.NewClient(oAuthClient)
|
||||
@@ -283,16 +312,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, f.trueRootID, &newF)
|
||||
newF.root = newRoot
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -300,8 +329,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -1240,24 +1274,38 @@ func (o *Object) MimeType() string {
|
||||
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
checkpoint := f.opt.Checkpoint
|
||||
|
||||
quit := make(chan bool)
|
||||
go func() {
|
||||
var ticker *time.Ticker
|
||||
var tickerC <-chan time.Time
|
||||
for {
|
||||
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
|
||||
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
|
||||
fs.Debugf(f, "Unable to save checkpoint: %v", err)
|
||||
}
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
case <-time.After(pollInterval):
|
||||
case pollInterval, ok := <-pollIntervalChan:
|
||||
if !ok {
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
}
|
||||
return
|
||||
}
|
||||
if pollInterval == 0 {
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
ticker, tickerC = nil, nil
|
||||
}
|
||||
} else {
|
||||
ticker = time.NewTicker(pollInterval)
|
||||
tickerC = ticker.C
|
||||
}
|
||||
case <-tickerC:
|
||||
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
|
||||
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
|
||||
fs.Debugf(f, "Unable to save checkpoint: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return quit
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -22,12 +22,14 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/2018-03-28/azblob"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
@@ -45,11 +47,12 @@ const (
|
||||
maxTotalParts = 50000 // in multipart upload
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
// maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
||||
defaultChunkSize = 4 * 1024 * 1024
|
||||
maxChunkSize = 100 * 1024 * 1024
|
||||
defaultUploadCutoff = 256 * 1024 * 1024
|
||||
maxUploadCutoff = 256 * 1024 * 1024
|
||||
defaultChunkSize = 4 * fs.MebiByte
|
||||
maxChunkSize = 100 * fs.MebiByte
|
||||
defaultUploadCutoff = 256 * fs.MebiByte
|
||||
maxUploadCutoff = 256 * fs.MebiByte
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -73,23 +76,44 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload.",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Help: "Cutoff for switching to chunked upload (<= 256MB).",
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Upload chunk size. Must fit in memory.",
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size (<= 100MB).
|
||||
|
||||
Note that this is stored in memory and there may be up to
|
||||
"--transfers" chunks stored at once in memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Help: "Size of blob list.",
|
||||
Name: "list_chunk",
|
||||
Help: `Size of blob list.
|
||||
|
||||
This sets the number of blobs requested in each listing chunk. Default
|
||||
is the maximum, 5000. "List blobs" requests are permitted 2 minutes
|
||||
per megabyte to complete. If an operation is taking longer than 2
|
||||
minutes per megabyte on average, it will time out (
|
||||
[source](https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-blob-service-operations#exceptions-to-default-timeout-interval)
|
||||
). This can be used to limit the number of blobs items to return, to
|
||||
avoid the time out.`,
|
||||
Default: maxListChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "access_tier",
|
||||
Help: "Access tier of blob, supports hot, cool and archive tiers.\nArchived blobs can be restored by setting access tier to hot or cool." +
|
||||
" Leave blank if you intend to use default access tier, which is set at account level",
|
||||
Help: `Access tier of blob: hot, cool or archive.
|
||||
|
||||
Archived blobs can be restored by setting access tier to hot or
|
||||
cool. Leave blank if you intend to use default access tier, which is
|
||||
set at account level
|
||||
|
||||
If there is no "access tier" specified, rclone doesn't apply any tier.
|
||||
rclone performs "Set Tier" operation on blobs while uploading, if objects
|
||||
are not modified, specifying "access tier" to new one will have no effect.
|
||||
If blobs are in "archive tier" at remote, trying to perform data transfer
|
||||
operations from remote will not be allowed. User should first restore by
|
||||
tiering blob to "Hot" or "Cool".`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
@@ -113,6 +137,7 @@ type Fs struct {
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // http client we are using
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
cntURL *azblob.ContainerURL // reference to containerURL
|
||||
container string // the container we are working on
|
||||
@@ -191,19 +216,6 @@ func validateAccessTier(tier string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// validAccessTiers returns list of supported storage tiers on azureblob fs
|
||||
func validAccessTiers() []string {
|
||||
validTiers := [...]azblob.AccessTierType{azblob.AccessTierHot, azblob.AccessTierCool,
|
||||
azblob.AccessTierArchive}
|
||||
|
||||
var tiers [len(validTiers)]string
|
||||
|
||||
for i, tier := range validTiers {
|
||||
tiers[i] = string(tier)
|
||||
}
|
||||
return tiers[:]
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
401, // Unauthorized (eg "Token has expired")
|
||||
@@ -229,7 +241,73 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
if cs > maxChunkSize {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%v must be less than or equal to %v", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// httpClientFactory creates a Factory object that sends HTTP requests
|
||||
// to a rclone's http.Client.
|
||||
//
|
||||
// copied from azblob.newDefaultHTTPClientFactory
|
||||
func httpClientFactory(client *http.Client) pipeline.Factory {
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
r, err := client.Do(request.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = pipeline.NewError(err, "HTTP request failed")
|
||||
}
|
||||
return pipeline.NewHTTPResponse(r), err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// newPipeline creates a Pipeline using the specified credentials and options.
|
||||
//
|
||||
// this code was copied from azblob.NewPipeline
|
||||
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
|
||||
// Closest to API goes first; closest to the wire goes last
|
||||
factories := []pipeline.Factory{
|
||||
azblob.NewTelemetryPolicyFactory(o.Telemetry),
|
||||
azblob.NewUniqueRequestIDPolicyFactory(),
|
||||
azblob.NewRetryPolicyFactory(o.Retry),
|
||||
c,
|
||||
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
|
||||
azblob.NewRequestLogPolicyFactory(o.RequestLog),
|
||||
}
|
||||
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -238,11 +316,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.UploadCutoff > maxUploadCutoff {
|
||||
return nil, errors.Errorf("azure: upload cutoff (%v) must be less than or equal to %v", opt.UploadCutoff, maxUploadCutoff)
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "azure: upload cutoff")
|
||||
}
|
||||
if opt.ChunkSize > maxChunkSize {
|
||||
return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, opt.ChunkSize)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "azure: chunk size")
|
||||
}
|
||||
if opt.ListChunkSize > maxListChunkSize {
|
||||
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||
@@ -262,6 +342,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant).SetPacer(pacer.S3Pacer),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
|
||||
var (
|
||||
u *url.URL
|
||||
serviceURL azblob.ServiceURL
|
||||
@@ -278,7 +375,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
case opt.SASURL != "":
|
||||
@@ -287,7 +384,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
||||
}
|
||||
// use anonymous credentials in case of sas url
|
||||
pipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})
|
||||
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
// Check if we have container level SAS or account level sas
|
||||
parts := azblob.NewBlobURLParts(*u)
|
||||
if parts.ContainerName != "" {
|
||||
@@ -295,7 +392,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
||||
}
|
||||
|
||||
container = parts.ContainerName
|
||||
f.container = parts.ContainerName
|
||||
containerURL = azblob.NewContainerURL(*u, pipeline)
|
||||
} else {
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
@@ -304,25 +401,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
default:
|
||||
return nil, errors.New("Need account+key or connectionString or sasURL")
|
||||
}
|
||||
f.svcURL = &serviceURL
|
||||
f.cntURL = &containerURL
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
svcURL: &serviceURL,
|
||||
cntURL: &containerURL,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ListTiers: true,
|
||||
}).Fill(f)
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the (container,directory) is actually an existing file
|
||||
@@ -336,8 +417,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
_, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.root = oldRoot
|
||||
return f, nil
|
||||
}
|
||||
@@ -393,6 +474,21 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
}
|
||||
|
||||
// Returns whether file is a directory marker or not
|
||||
func isDirectoryMarker(size int64, metadata azblob.Metadata, remote string) bool {
|
||||
// Directory markers are 0 length
|
||||
if size == 0 {
|
||||
// Note that metadata with hdi_isfolder = true seems to be a
|
||||
// defacto standard for marking blobs as directories.
|
||||
endsWithSlash := strings.HasSuffix(remote, "/")
|
||||
if endsWithSlash || remote == "" || metadata["hdi_isfolder"] == "true" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object
|
||||
type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
||||
|
||||
@@ -428,6 +524,7 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
MaxResults: int32(maxResults),
|
||||
}
|
||||
ctx := context.Background()
|
||||
directoryMarkers := map[string]struct{}{}
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListBlobsHierarchySegmentResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -457,13 +554,23 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote := file.Name[len(f.root):]
|
||||
// Check for directory
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if isDirectory {
|
||||
remote = remote[:len(remote)-1]
|
||||
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, file, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Keep track of directory markers. If recursing then
|
||||
// there will be no Prefixes so no need to keep track
|
||||
if !recurse {
|
||||
directoryMarkers[remote] = struct{}{}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, file, isDirectory)
|
||||
err = fn(remote, file, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -476,6 +583,10 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote = remote[len(f.root):]
|
||||
// Don't send if already sent as a directory marker
|
||||
if _, found := directoryMarkers[remote]; found {
|
||||
continue
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, nil, true)
|
||||
if err != nil {
|
||||
@@ -643,6 +754,35 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
||||
return fs, fs.Update(in, src, options...)
|
||||
}
|
||||
|
||||
// Check if the container exists
|
||||
//
|
||||
// NB this can return incorrect results if called immediately after container deletion
|
||||
func (f *Fs) dirExists() (bool, error) {
|
||||
options := azblob.ListBlobsSegmentOptions{
|
||||
Details: azblob.BlobListingDetails{
|
||||
Copy: false,
|
||||
Metadata: false,
|
||||
Snapshots: false,
|
||||
UncommittedBlobs: false,
|
||||
Deleted: false,
|
||||
},
|
||||
MaxResults: 1,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
ctx := context.Background()
|
||||
_, err := f.cntURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "", options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
f.containerOKMu.Lock()
|
||||
@@ -650,6 +790,15 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
if f.containerOK {
|
||||
return nil
|
||||
}
|
||||
if !f.containerDeleted {
|
||||
exists, err := f.dirExists()
|
||||
if err == nil {
|
||||
f.containerOK = exists
|
||||
}
|
||||
if err != nil || exists {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// now try to create the container
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -662,6 +811,11 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
f.containerOK = true
|
||||
return false, nil
|
||||
case azblob.ServiceCodeContainerBeingDeleted:
|
||||
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
|
||||
// When a container is deleted, a container with the same name cannot be created
|
||||
// for at least 30 seconds; the container may not be available for more than 30
|
||||
// seconds if the service is still processing the request.
|
||||
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
|
||||
f.containerDeleted = true
|
||||
return true, err
|
||||
}
|
||||
@@ -679,7 +833,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// isEmpty checks to see if a given directory is empty and returns an error if not
|
||||
func (f *Fs) isEmpty(dir string) (err error) {
|
||||
empty := true
|
||||
err = f.list("", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
err = f.list(dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
empty = false
|
||||
return nil
|
||||
})
|
||||
@@ -874,27 +1028,37 @@ func (o *Object) setMetadata(metadata azblob.Metadata) {
|
||||
// o.md5
|
||||
// o.meta
|
||||
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
|
||||
metadata := info.NewMetadata()
|
||||
size := info.ContentLength()
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
||||
o.mimeType = info.ContentType()
|
||||
o.size = info.ContentLength()
|
||||
o.modTime = time.Time(info.LastModified())
|
||||
o.size = size
|
||||
o.modTime = info.LastModified()
|
||||
o.accessTier = azblob.AccessTierType(info.AccessTier())
|
||||
o.setMetadata(info.NewMetadata())
|
||||
o.setMetadata(metadata)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
||||
metadata := info.Metadata
|
||||
size := *info.Properties.ContentLength
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.Properties.ContentMD5)
|
||||
o.mimeType = *info.Properties.ContentType
|
||||
o.size = *info.Properties.ContentLength
|
||||
o.size = size
|
||||
o.modTime = info.Properties.LastModified
|
||||
o.accessTier = info.Properties.AccessTier
|
||||
o.setMetadata(info.Metadata)
|
||||
o.setMetadata(metadata)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -940,12 +1104,6 @@ func (o *Object) readMetaData() (err error) {
|
||||
return o.decodeMetaDataFromPropertiesResponse(blobProperties)
|
||||
}
|
||||
|
||||
// timeString returns modTime as the number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC as a decimal string.
|
||||
func timeString(modTime time.Time) string {
|
||||
return strconv.FormatInt(modTime.UnixNano()/1E6, 10)
|
||||
}
|
||||
|
||||
// parseTimeString converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
||||
// the modTime variable.
|
||||
@@ -1247,11 +1405,20 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
Metadata: o.meta,
|
||||
BlobHTTPHeaders: httpHeaders,
|
||||
}
|
||||
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
|
||||
// is merged the SDK can't upload a single blob of exactly the chunk
|
||||
// size, so upload with a multpart upload to work around.
|
||||
// See: https://github.com/ncw/rclone/issues/2653
|
||||
multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
|
||||
if size == int64(o.fs.opt.ChunkSize) {
|
||||
multipartUpload = true
|
||||
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
if size >= int64(o.fs.opt.UploadCutoff) {
|
||||
if multipartUpload {
|
||||
// If a large file upload in chunks
|
||||
err = o.uploadMultipart(in, size, &blob, &httpHeaders)
|
||||
} else {
|
||||
@@ -1316,7 +1483,7 @@ func (o *Object) SetTier(tier string) error {
|
||||
blob := o.getBlobReference()
|
||||
ctx := context.Background()
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier)
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
|
||||
@@ -1337,11 +1504,6 @@ func (o *Object) GetTier() string {
|
||||
return string(o.accessTier)
|
||||
}
|
||||
|
||||
// ListTiers returns list of storage tiers supported on this object
|
||||
func (o *Object) ListTiers() []string {
|
||||
return validAccessTiers()
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -11,9 +11,7 @@ import (
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// Check first feature flags are set on this
|
||||
// remote
|
||||
enabled := f.Features().ListTiers
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().SetTier
|
||||
enabled := f.Features().SetTier
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
|
||||
@@ -1,20 +1,37 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob_test
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/azureblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*azureblob.Object)(nil),
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build freebsd netbsd openbsd plan9 solaris !go1.8
|
||||
// +build plan9 solaris !go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -17,12 +17,12 @@ type Error struct {
|
||||
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
// Error satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
||||
}
|
||||
|
||||
// Fatal statisfies the Fatal interface
|
||||
// Fatal satisfies the Fatal interface
|
||||
//
|
||||
// It indicates which errors should be treated as fatal
|
||||
func (e *Error) Fatal() bool {
|
||||
@@ -100,7 +100,7 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is unitialised
|
||||
// IsZero returns true if the timestamp is uninitialized
|
||||
func (t Timestamp) IsZero() bool {
|
||||
return time.Time(t).IsZero()
|
||||
}
|
||||
@@ -136,6 +136,7 @@ type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
|
||||
176
backend/b2/b2.go
176
backend/b2/b2.go
@@ -48,9 +48,9 @@ const (
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
minChunkSize = 5E6
|
||||
defaultChunkSize = 96 * 1024 * 1024
|
||||
defaultUploadCutoff = 200E6
|
||||
minChunkSize = 5 * fs.MebiByte
|
||||
defaultChunkSize = 96 * fs.MebiByte
|
||||
defaultUploadCutoff = 200 * fs.MebiByte
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -77,14 +77,24 @@ func init() {
|
||||
Help: "Endpoint for the service.\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "test_mode",
|
||||
Help: "A flag string for X-Bz-Test-Mode header for debugging.",
|
||||
Name: "test_mode",
|
||||
Help: `A flag string for X-Bz-Test-Mode header for debugging.
|
||||
|
||||
This is for debugging purposes only. Setting it to one of the strings
|
||||
below will cause b2 to return specific errors:
|
||||
|
||||
* "fail_some_uploads"
|
||||
* "expire_some_account_authorization_tokens"
|
||||
* "force_cap_exceeded"
|
||||
|
||||
These will be set in the "X-Bz-Test-Mode" header which is documented
|
||||
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
|
||||
Default: "",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "versions",
|
||||
Help: "Include old versions in directory listings.",
|
||||
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -92,14 +102,36 @@ func init() {
|
||||
Help: "Permanently delete files on remote removal, otherwise hide files.",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload.",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
|
||||
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||
|
||||
This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Upload chunk size. Must fit in memory.",
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size. Must fit in memory.
|
||||
|
||||
When uploading large files, chunk the file into this size. Note that
|
||||
these chunks are buffered in memory and there might a maximum of
|
||||
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
|
||||
minimum size.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_url",
|
||||
Help: `Custom endpoint for downloads.
|
||||
|
||||
This is usually set to a Cloudflare CDN URL as Backblaze offers
|
||||
free egress for data downloaded through the Cloudflare network.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
@@ -107,14 +139,16 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
@@ -263,7 +297,38 @@ func errorHandler(resp *http.Response) error {
|
||||
return errResponse
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
f.fillBufferTokens() // reset the buffer tokens
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
|
||||
if cs < opt.ChunkSize {
|
||||
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(&f.opt, cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -271,11 +336,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.UploadCutoff < opt.ChunkSize {
|
||||
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", opt.UploadCutoff, opt.ChunkSize)
|
||||
err = checkUploadCutoff(opt, opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2: upload cutoff")
|
||||
}
|
||||
if opt.ChunkSize < minChunkSize {
|
||||
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, opt.ChunkSize)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2: chunk size")
|
||||
}
|
||||
bucket, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
@@ -291,13 +358,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt.Endpoint = defaultEndpoint
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
bufferTokens: make(chan []byte, fs.Config.Transfers),
|
||||
name: name,
|
||||
opt: *opt,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -310,16 +376,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.srv.SetHeader(testModeHeader, testMode)
|
||||
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
|
||||
}
|
||||
// Fill up the buffer tokens
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
f.bufferTokens <- nil
|
||||
}
|
||||
f.fillBufferTokens()
|
||||
err = f.authorizeAccount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to authorize account")
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.bucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := f.info.Allowed.BucketName
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
if allowedBucket != f.bucket {
|
||||
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
}
|
||||
f.markBucketOK()
|
||||
f.setBucketID(f.info.Allowed.BucketID)
|
||||
}
|
||||
@@ -421,6 +491,14 @@ func (f *Fs) clearUploadURL() {
|
||||
f.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Fill up (or reset) the buffer tokens
|
||||
func (f *Fs) fillBufferTokens() {
|
||||
f.bufferTokens = make(chan []byte, fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
f.bufferTokens <- nil
|
||||
}
|
||||
}
|
||||
|
||||
// getUploadBlock gets a block from the pool of size chunkSize
|
||||
func (f *Fs) getUploadBlock() []byte {
|
||||
buf := <-f.bufferTokens
|
||||
@@ -924,6 +1002,12 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
errReturn = err
|
||||
}
|
||||
}
|
||||
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
||||
if time.Since(time.Time(timestamp)).Hours() > 24 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Delete Config.Transfers in parallel
|
||||
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
|
||||
@@ -947,6 +1031,9 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
if object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
toBeDeleted <- object
|
||||
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||
toBeDeleted <- object
|
||||
} else {
|
||||
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
|
||||
}
|
||||
@@ -1218,9 +1305,17 @@ var _ io.ReadCloser = &openFile{}
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.fs.info.DownloadURL,
|
||||
Options: options,
|
||||
}
|
||||
|
||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||
// otherwise use the custom downloadUrl
|
||||
if o.fs.opt.DownloadURL == "" {
|
||||
opts.RootURL = o.fs.info.DownloadURL
|
||||
} else {
|
||||
opts.RootURL = o.fs.opt.DownloadURL
|
||||
}
|
||||
|
||||
// Download by id if set otherwise by name
|
||||
if o.id != "" {
|
||||
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
|
||||
@@ -1381,7 +1476,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// Content-Type b2/x-auto to automatically set the stored Content-Type
|
||||
// post upload. In the case where a file extension is absent or the
|
||||
// lookup fails, the Content-Type is set to application/octet-stream. The
|
||||
// Content-Type mappings can be purused here.
|
||||
// Content-Type mappings can be pursued here.
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
// required
|
||||
@@ -1428,11 +1523,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
},
|
||||
ContentLength: &size,
|
||||
}
|
||||
// for go1.8 (see release notes) we must nil the Body if we want a
|
||||
// "Content-Length: 0" header which b2 requires for all files.
|
||||
if size == 0 {
|
||||
opts.Body = nil
|
||||
}
|
||||
var response api.FileInfo
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test B2 filesystem interface
|
||||
package b2_test
|
||||
package b2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,23 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestB2:",
|
||||
NilObject: (*b2.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
NeedMultipleChunks: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -116,8 +116,10 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
||||
},
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
||||
@@ -45,7 +45,7 @@ type Error struct {
|
||||
RequestID string `json:"request_id"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
|
||||
if e.Message != "" {
|
||||
@@ -57,7 +57,7 @@ func (e *Error) Error() string {
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error statisfies the error interface
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
|
||||
@@ -85,7 +85,7 @@ func init() {
|
||||
Help: "Box App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload.",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -171,13 +171,13 @@ var retryErrorCodes = []int{
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
authRety := false
|
||||
authRetry := false
|
||||
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
authRety = true
|
||||
authRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// substitute reserved characters for box
|
||||
@@ -252,7 +252,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Box: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
@@ -283,16 +283,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, rootID, &newF)
|
||||
newF.root = newRoot
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -300,9 +300,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f.features.Fill(&newF)
|
||||
f.features.Fill(&tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -525,10 +530,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(in, src, options...)
|
||||
return existingObj, existingObj.Update(in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(in, src)
|
||||
|
||||
@@ -211,8 +211,8 @@ outer:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
if reqSize >= chunkSize {
|
||||
reqSize = chunkSize
|
||||
}
|
||||
|
||||
// Make a block of memory
|
||||
|
||||
230
backend/cache/cache.go
vendored
230
backend/cache/cache.go
vendored
@@ -11,6 +11,7 @@ import (
|
||||
"os/signal"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -23,6 +24,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
@@ -86,8 +88,12 @@ func init() {
|
||||
Help: "Skip all certificate verifications when connecting to the Plex server",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading.",
|
||||
Name: "chunk_size",
|
||||
Help: `The size of a chunk (partial file data).
|
||||
|
||||
Use lower numbers for slower connections. If the chunk size is
|
||||
changed, any downloaded chunks will be invalid and cache-chunk-path
|
||||
will need to be cleared or unexpected EOF errors will occur.`,
|
||||
Default: DefCacheChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "1m",
|
||||
@@ -100,8 +106,10 @@ func init() {
|
||||
Help: "10 MB",
|
||||
}},
|
||||
}, {
|
||||
Name: "info_age",
|
||||
Help: "How much time should object info (file size, file hashes etc) be stored in cache.\nUse a very high value if you don't plan on changing the source FS from outside the cache.\nAccepted units are: \"s\", \"m\", \"h\".",
|
||||
Name: "info_age",
|
||||
Help: `How long to cache file structure information (directory listings, file size, times etc).
|
||||
If all write operations are done through the cache then you can safely make
|
||||
this value very large as the cache store will also be updated in real time.`,
|
||||
Default: DefCacheInfoAge,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "1h",
|
||||
@@ -114,8 +122,11 @@ func init() {
|
||||
Help: "48 hours",
|
||||
}},
|
||||
}, {
|
||||
Name: "chunk_total_size",
|
||||
Help: "The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted.",
|
||||
Name: "chunk_total_size",
|
||||
Help: `The total size that the chunks can take up on the local disk.
|
||||
|
||||
If the cache exceeds this value then it will start to delete the
|
||||
oldest chunks until it goes under this value.`,
|
||||
Default: DefCacheTotalChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "500M",
|
||||
@@ -130,63 +141,143 @@ func init() {
|
||||
}, {
|
||||
Name: "db_path",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: "Directory to cache DB",
|
||||
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_path",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: "Directory to cache chunk files",
|
||||
Name: "chunk_path",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: `Directory to cache chunk files.
|
||||
|
||||
Path to where partial file data (chunks) are stored locally. The remote
|
||||
name is appended to the final path.
|
||||
|
||||
This config follows the "--cache-db-path". If you specify a custom
|
||||
location for "--cache-db-path" and don't specify one for "--cache-chunk-path"
|
||||
then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "db_purge",
|
||||
Default: false,
|
||||
Help: "Purge the cache DB before",
|
||||
Help: "Clear all the cached data for this remote on start.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_clean_interval",
|
||||
Default: DefCacheChunkCleanInterval,
|
||||
Help: "Interval at which chunk cleanup runs",
|
||||
Name: "chunk_clean_interval",
|
||||
Default: DefCacheChunkCleanInterval,
|
||||
Help: `How often should the cache perform cleanups of the chunk storage.
|
||||
The default value should be ok for most people. If you find that the
|
||||
cache goes over "cache-chunk-total-size" too often then try to lower
|
||||
this value to force it to perform cleanups more often.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "read_retries",
|
||||
Default: DefCacheReadRetries,
|
||||
Help: "How many times to retry a read from a cache storage",
|
||||
Name: "read_retries",
|
||||
Default: DefCacheReadRetries,
|
||||
Help: `How many times to retry a read from a cache storage.
|
||||
|
||||
Since reading from a cache stream is independent from downloading file
|
||||
data, readers can get to a point where there's no more data in the
|
||||
cache. Most of the times this can indicate a connectivity issue if
|
||||
cache isn't able to provide file data anymore.
|
||||
|
||||
For really slow connections, increase this to a point where the stream is
|
||||
able to provide data but your experience will be very stuttering.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "workers",
|
||||
Default: DefCacheTotalWorkers,
|
||||
Help: "How many workers should run in parallel to download chunks",
|
||||
Name: "workers",
|
||||
Default: DefCacheTotalWorkers,
|
||||
Help: `How many workers should run in parallel to download chunks.
|
||||
|
||||
Higher values will mean more parallel processing (better CPU needed)
|
||||
and more concurrent requests on the cloud provider. This impacts
|
||||
several aspects like the cloud provider API limits, more stress on the
|
||||
hardware that rclone runs on but it also means that streams will be
|
||||
more fluid and data will be available much more faster to readers.
|
||||
|
||||
**Note**: If the optional Plex integration is enabled then this
|
||||
setting will adapt to the type of reading performed and the value
|
||||
specified here will be used as a maximum number of workers to use.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_no_memory",
|
||||
Default: DefCacheChunkNoMemory,
|
||||
Help: "Disable the in-memory cache for storing chunks during streaming",
|
||||
Name: "chunk_no_memory",
|
||||
Default: DefCacheChunkNoMemory,
|
||||
Help: `Disable the in-memory cache for storing chunks during streaming.
|
||||
|
||||
By default, cache will keep file data during streaming in RAM as well
|
||||
to provide it to readers as fast as possible.
|
||||
|
||||
This transient data is evicted as soon as it is read and the number of
|
||||
chunks stored doesn't exceed the number of workers. However, depending
|
||||
on other settings like "cache-chunk-size" and "cache-workers" this footprint
|
||||
can increase if there are parallel streams too (multiple files being read
|
||||
at the same time).
|
||||
|
||||
If the hardware permits it, use this feature to provide an overall better
|
||||
performance during streaming but it can also be disabled if RAM is not
|
||||
available on the local machine.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "rps",
|
||||
Default: int(DefCacheRps),
|
||||
Help: "Limits the number of requests per second to the source FS. -1 disables the rate limiter",
|
||||
Name: "rps",
|
||||
Default: int(DefCacheRps),
|
||||
Help: `Limits the number of requests per second to the source FS (-1 to disable)
|
||||
|
||||
This setting places a hard limit on the number of requests per second
|
||||
that cache will be doing to the cloud provider remote and try to
|
||||
respect that value by setting waits between reads.
|
||||
|
||||
If you find that you're getting banned or limited on the cloud
|
||||
provider through cache and know that a smaller number of requests per
|
||||
second will allow you to work with it then you can use this setting
|
||||
for that.
|
||||
|
||||
A good balance of all the other settings should make this setting
|
||||
useless but it is available to set for more special cases.
|
||||
|
||||
**NOTE**: This will limit the number of requests during streams but
|
||||
other API calls to the cloud provider like directory listings will
|
||||
still pass.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "writes",
|
||||
Default: DefCacheWrites,
|
||||
Help: "Will cache file data on writes through the FS",
|
||||
Name: "writes",
|
||||
Default: DefCacheWrites,
|
||||
Help: `Cache file data on writes through the FS
|
||||
|
||||
If you need to read files immediately after you upload them through
|
||||
cache you can enable this flag to have their data stored in the
|
||||
cache store at the same time during upload.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "tmp_upload_path",
|
||||
Default: "",
|
||||
Help: "Directory to keep temporary files until they are uploaded to the cloud storage",
|
||||
Name: "tmp_upload_path",
|
||||
Default: "",
|
||||
Help: `Directory to keep temporary files until they are uploaded.
|
||||
|
||||
This is the path where cache will use as a temporary storage for new
|
||||
files that need to be uploaded to the cloud provider.
|
||||
|
||||
Specifying a value will enable this feature. Without it, it is
|
||||
completely disabled and files will be uploaded directly to the cloud
|
||||
provider`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "tmp_wait_time",
|
||||
Default: DefCacheTmpWaitTime,
|
||||
Help: "How long should files be stored in local cache before being uploaded",
|
||||
Name: "tmp_wait_time",
|
||||
Default: DefCacheTmpWaitTime,
|
||||
Help: `How long should files be stored in local cache before being uploaded
|
||||
|
||||
This is the duration that a file must wait in the temporary location
|
||||
_cache-tmp-upload-path_ before it is selected for upload.
|
||||
|
||||
Note that only one file is uploaded at a time and it can take longer
|
||||
to start the upload if a queue formed for this purpose.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "db_wait_time",
|
||||
Default: DefCacheDbWaitTime,
|
||||
Help: "How long to wait for the DB to be available - 0 is unlimited",
|
||||
Name: "db_wait_time",
|
||||
Default: DefCacheDbWaitTime,
|
||||
Help: `How long to wait for the DB to be available - 0 is unlimited
|
||||
|
||||
Only one process can have the DB open at any one time, so rclone waits
|
||||
for this duration for the DB to become available before it gives an
|
||||
error.
|
||||
|
||||
If you set it to 0 then it will wait forever.`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
@@ -273,7 +364,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
|
||||
}
|
||||
|
||||
remotePath := path.Join(wPath, rootPath)
|
||||
remotePath := fspath.JoinRootPath(wPath, rootPath)
|
||||
wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
|
||||
@@ -380,7 +471,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
fs.Infof(name, "Chunk Clean Interval: %v", f.opt.ChunkCleanInterval)
|
||||
fs.Infof(name, "Workers: %v", f.opt.TotalWorkers)
|
||||
fs.Infof(name, "File Age: %v", f.opt.InfoAge)
|
||||
if !f.opt.StoreWrites {
|
||||
if f.opt.StoreWrites {
|
||||
fs.Infof(name, "Cache Writes: enabled")
|
||||
}
|
||||
|
||||
@@ -415,7 +506,9 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}()
|
||||
|
||||
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
||||
doChangeNotify(f.receiveChangeNotify, time.Duration(f.opt.ChunkCleanInterval))
|
||||
pollInterval := make(chan time.Duration, 1)
|
||||
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
||||
doChangeNotify(f.receiveChangeNotify, pollInterval)
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
@@ -483,7 +576,7 @@ The slice indices are similar to Python slices: start[:end]
|
||||
|
||||
start is the 0 based chunk number from the beginning of the file
|
||||
to fetch inclusive. end is 0 based chunk number from the beginning
|
||||
of the file to fetch exclisive.
|
||||
of the file to fetch exclusive.
|
||||
Both values can be negative, in which case they count from the back
|
||||
of the file. The value "-5:" represents the last 5 chunks of a file.
|
||||
|
||||
@@ -777,15 +870,18 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
|
||||
}
|
||||
}
|
||||
|
||||
// ChangeNotify can subsribe multiple callers
|
||||
// ChangeNotify can subscribe multiple callers
|
||||
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
|
||||
// and also notifies other caches (i.e VFS) to clear out whenever something changes
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||
f.parentsForgetMu.Lock()
|
||||
defer f.parentsForgetMu.Unlock()
|
||||
fs.Debugf(f, "subscribing to ChangeNotify")
|
||||
f.parentsForgetFn = append(f.parentsForgetFn, notifyFunc)
|
||||
return make(chan bool)
|
||||
go func() {
|
||||
for range pollInterval {
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
@@ -886,7 +982,6 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
fs.Debugf(dir, "list: cached entries: %v", entries)
|
||||
return entries, nil
|
||||
}
|
||||
// FIXME need to clean existing cached listing
|
||||
|
||||
// we first search any temporary files stored locally
|
||||
var cachedEntries fs.DirEntries
|
||||
@@ -912,27 +1007,42 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
|
||||
// search from the source
|
||||
entries, err = f.Fs.List(dir)
|
||||
sourceEntries, err := f.Fs.List(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(dir, "list: read %v from source", len(entries))
|
||||
fs.Debugf(dir, "list: source entries: %v", entries)
|
||||
fs.Debugf(dir, "list: read %v from source", len(sourceEntries))
|
||||
fs.Debugf(dir, "list: source entries: %v", sourceEntries)
|
||||
|
||||
sort.Sort(sourceEntries)
|
||||
for _, entry := range entries {
|
||||
entryRemote := entry.Remote()
|
||||
i := sort.Search(len(sourceEntries), func(i int) bool { return sourceEntries[i].Remote() >= entryRemote })
|
||||
if i < len(sourceEntries) && sourceEntries[i].Remote() == entryRemote {
|
||||
continue
|
||||
}
|
||||
fp := path.Join(f.Root(), entryRemote)
|
||||
switch entry.(type) {
|
||||
case fs.Object:
|
||||
_ = f.cache.RemoveObject(fp)
|
||||
case fs.Directory:
|
||||
_ = f.cache.RemoveDir(fp)
|
||||
}
|
||||
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
|
||||
}
|
||||
entries = nil
|
||||
|
||||
// and then iterate over the ones from source (temp Objects will override source ones)
|
||||
var batchDirectories []*Directory
|
||||
for _, entry := range entries {
|
||||
sort.Sort(cachedEntries)
|
||||
tmpCnt := len(cachedEntries)
|
||||
for _, entry := range sourceEntries {
|
||||
switch o := entry.(type) {
|
||||
case fs.Object:
|
||||
// skip over temporary objects (might be uploading)
|
||||
found := false
|
||||
for _, t := range cachedEntries {
|
||||
if t.Remote() == o.Remote() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
oRemote := o.Remote()
|
||||
i := sort.Search(tmpCnt, func(i int) bool { return cachedEntries[i].Remote() >= oRemote })
|
||||
if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
|
||||
continue
|
||||
}
|
||||
co := ObjectFromOriginal(f, o).persist()
|
||||
@@ -1439,7 +1549,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
if srcObj.isTempFile() {
|
||||
// we check if the feature is stil active
|
||||
// we check if the feature is still active
|
||||
if f.opt.TempWritePath == "" {
|
||||
fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
|
||||
return nil, fs.ErrorCantCopy
|
||||
@@ -1515,7 +1625,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
|
||||
// if this is a temp object then we perform the changes locally
|
||||
if srcObj.isTempFile() {
|
||||
// we check if the feature is stil active
|
||||
// we check if the feature is still active
|
||||
if f.opt.TempWritePath == "" {
|
||||
fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
|
||||
return nil, fs.ErrorCantMove
|
||||
|
||||
66
backend/cache/cache_internal_test.go
vendored
66
backend/cache/cache_internal_test.go
vendored
@@ -4,6 +4,9 @@ package cache_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
@@ -12,21 +15,12 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"encoding/base64"
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
@@ -36,10 +30,11 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/object"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fs/rc/rcflags"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -392,10 +387,10 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
|
||||
// write the object
|
||||
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
||||
require.Equal(t, o.Size(), int64(testSize))
|
||||
require.Equal(t, o.Size(), testSize)
|
||||
time.Sleep(time.Second * 3)
|
||||
|
||||
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
|
||||
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||
|
||||
@@ -695,8 +690,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
rcflags.Opt.Enabled = true
|
||||
rc.Start(&rcflags.Opt)
|
||||
cacheExpire := rc.Calls.Get("cache/expire")
|
||||
assert.NotNil(t, cacheExpire)
|
||||
|
||||
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
@@ -729,13 +724,9 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
||||
|
||||
m := make(map[string]string)
|
||||
res, err := http.Post(fmt.Sprintf("http://localhost:5572/cache/expire?remote=%s", "data.bin"), "application/json; charset=utf-8", strings.NewReader(""))
|
||||
// Call the rc function
|
||||
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = res.Body.Close()
|
||||
}()
|
||||
_ = json.NewDecoder(res.Body).Decode(&m)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
@@ -745,23 +736,21 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
co, err = rootFs.NewObject("data.bin")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
|
||||
li1, err := runInstance.list(t, rootFs, "")
|
||||
_, err = runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testData2 := randStringBytes(int(chunkSize))
|
||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
|
||||
|
||||
// list should have 1 item only
|
||||
li1, err = runInstance.list(t, rootFs, "")
|
||||
li1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, li1, 1)
|
||||
|
||||
m = make(map[string]string)
|
||||
res2, err := http.Post("http://localhost:5572/cache/expire?remote=/", "application/json; charset=utf-8", strings.NewReader(""))
|
||||
// Call the rc function
|
||||
m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = res2.Body.Close()
|
||||
}()
|
||||
_ = json.NewDecoder(res2.Body).Decode(&m)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
@@ -769,6 +758,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
|
||||
// list should have 2 items now
|
||||
li2, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, li2, 2)
|
||||
}
|
||||
|
||||
@@ -1505,7 +1495,8 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
var err error
|
||||
|
||||
if r.useMount {
|
||||
f, err := os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
var f *os.File
|
||||
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1515,7 +1506,8 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
}()
|
||||
_, err = f.WriteString(data + append)
|
||||
} else {
|
||||
obj1, err := rootFs.NewObject(src)
|
||||
var obj1 fs.Object
|
||||
obj1, err = rootFs.NewObject(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1647,15 +1639,13 @@ func (r *run) getCacheFs(f fs.Fs) (*cache.Fs, error) {
|
||||
cfs, ok := f.(*cache.Fs)
|
||||
if ok {
|
||||
return cfs, nil
|
||||
} else {
|
||||
if f.Features().UnWrap != nil {
|
||||
cfs, ok := f.Features().UnWrap().(*cache.Fs)
|
||||
if ok {
|
||||
return cfs, nil
|
||||
}
|
||||
}
|
||||
if f.Features().UnWrap != nil {
|
||||
cfs, ok := f.Features().UnWrap().(*cache.Fs)
|
||||
if ok {
|
||||
return cfs, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("didn't found a cache fs")
|
||||
}
|
||||
|
||||
|
||||
3
backend/cache/cache_upload_test.go
vendored
3
backend/cache/cache_upload_test.go
vendored
@@ -3,6 +3,7 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
@@ -10,8 +11,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
|
||||
3
backend/cache/directory.go
vendored
3
backend/cache/directory.go
vendored
@@ -3,9 +3,8 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
5
backend/cache/handle.go
vendored
5
backend/cache/handle.go
vendored
@@ -5,12 +5,11 @@ package cache
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
|
||||
10
backend/cache/plex.go
vendored
10
backend/cache/plex.go
vendored
@@ -3,21 +3,19 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
|
||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
||||
15
backend/cache/storage_persistent.go
vendored
15
backend/cache/storage_persistent.go
vendored
@@ -3,20 +3,17 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"io/ioutil"
|
||||
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
bolt "github.com/coreos/bbolt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
@@ -401,7 +398,7 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
err = bucket.Put([]byte(cachedObject.Name), []byte(encoded))
|
||||
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
@@ -812,7 +809,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||
}
|
||||
err = bucket.Put([]byte(destPath), []byte(encoded))
|
||||
err = bucket.Put([]byte(destPath), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
}
|
||||
@@ -1052,7 +1049,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||
}
|
||||
err = bucket.Put([]byte(destPath), []byte(encoded))
|
||||
err = bucket.Put([]byte(destPath), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
}
|
||||
|
||||
@@ -17,11 +17,9 @@ import (
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/rfjakob/eme"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
|
||||
"github.com/rfjakob/eme"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -43,6 +41,7 @@ var (
|
||||
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
|
||||
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
|
||||
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
|
||||
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
|
||||
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
|
||||
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
|
||||
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
|
||||
@@ -286,6 +285,9 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
// not possible if decodeFilename() working correctly
|
||||
return "", ErrorTooShortAfterDecode
|
||||
}
|
||||
if len(rawCiphertext) > 2048 {
|
||||
return "", ErrorTooLongAfterDecode
|
||||
}
|
||||
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
|
||||
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
|
||||
if err != nil {
|
||||
@@ -461,7 +463,7 @@ func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
if int(newRune) < base {
|
||||
newRune += 256
|
||||
}
|
||||
_, _ = result.WriteRune(rune(newRune))
|
||||
_, _ = result.WriteRune(newRune)
|
||||
|
||||
default:
|
||||
_, _ = result.WriteRune(runeValue)
|
||||
@@ -746,7 +748,7 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) {
|
||||
return nil, fh.finishAndClose(ErrorEncryptedBadMagic)
|
||||
}
|
||||
// retreive the nonce
|
||||
// retrieve the nonce
|
||||
fh.nonce.fromBuf(readBuf[fileMagicSize:])
|
||||
fh.initialNonce = fh.nonce
|
||||
return fh, nil
|
||||
|
||||
@@ -194,6 +194,10 @@ func TestEncryptSegment(t *testing.T) {
|
||||
|
||||
func TestDecryptSegment(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 3328)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -201,6 +205,7 @@ func TestDecryptSegment(t *testing.T) {
|
||||
}{
|
||||
{"64=", ErrorBadBase32Encoding},
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
|
||||
@@ -4,7 +4,6 @@ package crypt
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -13,6 +12,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -67,8 +67,16 @@ func init() {
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "show_mapping",
|
||||
Help: "For all files listed show how the names encrypt.",
|
||||
Name: "show_mapping",
|
||||
Help: `For all files listed show how the names encrypt.
|
||||
|
||||
If this flag is set then for each file that the remote is asked to
|
||||
list, it will log (at level INFO) a line stating the decrypted file
|
||||
name and the encrypted file name.
|
||||
|
||||
This is so you can work out which encrypted names are which decrypted
|
||||
names just in case you need to do something with the encrypted file
|
||||
names, or for debugging purposes.`,
|
||||
Default: false,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
@@ -114,7 +122,7 @@ func NewCipher(m configmap.Mapper) (Cipher, error) {
|
||||
return newCipherForConfig(opt)
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -135,11 +143,11 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := path.Join(wPath, cipher.EncryptFileName(rpath))
|
||||
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
// if that didn't produce a file, look for a directory
|
||||
if err != fs.ErrorIsFile {
|
||||
remotePath = path.Join(wPath, cipher.EncryptDirName(rpath))
|
||||
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
|
||||
}
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
@@ -165,7 +173,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
doChangeNotify := wrappedFs.Features().ChangeNotify
|
||||
if doChangeNotify != nil {
|
||||
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
decrypted, err := f.DecryptFileName(path)
|
||||
if err != nil {
|
||||
@@ -174,7 +182,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
notifyFunc(decrypted, entryType)
|
||||
}
|
||||
return doChangeNotify(wrappedNotifyFunc, pollInterval)
|
||||
doChangeNotify(wrappedNotifyFunc, pollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -547,7 +555,7 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
|
||||
}
|
||||
|
||||
// ComputeHash takes the nonce from o, and encrypts the contents of
|
||||
// src with it, and calcuates the hash given by HashType on the fly
|
||||
// src with it, and calculates the hash given by HashType on the fly
|
||||
//
|
||||
// Note that we break lots of encapsulation in this function.
|
||||
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||
|
||||
@@ -7,13 +7,30 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive" // for integration tests
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/swift" // for integration tests
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// TestStandard runs integration tests against the remote
|
||||
func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -30,6 +47,9 @@ func TestStandard(t *testing.T) {
|
||||
|
||||
// TestOff runs integration tests against the remote
|
||||
func TestOff(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
|
||||
name := "TestCrypt2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -46,6 +66,9 @@ func TestOff(t *testing.T) {
|
||||
|
||||
// TestObfuscate runs integration tests against the remote
|
||||
func TestObfuscate(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,62 +1,82 @@
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/api/drive/v3"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/drive/v3"
|
||||
)
|
||||
|
||||
const exampleExportFormats = `{
|
||||
"application/vnd.google-apps.document": [
|
||||
"application/rtf",
|
||||
"application/vnd.oasis.opendocument.text",
|
||||
"text/html",
|
||||
"application/pdf",
|
||||
"application/epub+zip",
|
||||
"application/zip",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.spreadsheet": [
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet",
|
||||
"text/tab-separated-values",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"text/csv",
|
||||
"application/zip",
|
||||
"application/vnd.oasis.opendocument.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.jam": [
|
||||
"application/pdf"
|
||||
],
|
||||
"application/vnd.google-apps.script": [
|
||||
"application/vnd.google-apps.script+json"
|
||||
],
|
||||
"application/vnd.google-apps.presentation": [
|
||||
"application/vnd.oasis.opendocument.presentation",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.form": [
|
||||
"application/zip"
|
||||
],
|
||||
"application/vnd.google-apps.drawing": [
|
||||
"image/svg+xml",
|
||||
"image/png",
|
||||
"application/pdf",
|
||||
"image/jpeg"
|
||||
]
|
||||
}`
|
||||
func TestDriveScopes(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
wantFlag bool
|
||||
}{
|
||||
{"", []string{
|
||||
"https://www.googleapis.com/auth/drive",
|
||||
}, false},
|
||||
{" drive.file , drive.readonly", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.readonly",
|
||||
}, false},
|
||||
{" drive.file , drive.appfolder", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.appfolder",
|
||||
}, true},
|
||||
} {
|
||||
got := driveScopes(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
gotFlag := driveScopesContainsAppFolder(got)
|
||||
assert.Equal(t, test.wantFlag, gotFlag, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
var additionalMimeTypes = map[string]string{
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
|
||||
"application/vnd.ms-excel.template.macroenabled.12": ".xltm",
|
||||
"application/vnd.ms-powerpoint.presentation.macroenabled.12": ".pptm",
|
||||
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": ".ppsm",
|
||||
"application/vnd.ms-powerpoint.template.macroenabled.12": ".potm",
|
||||
"application/vnd.ms-powerpoint": ".ppt",
|
||||
"application/vnd.ms-word.document.macroenabled.12": ".docm",
|
||||
"application/vnd.ms-word.template.macroenabled.12": ".dotm",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.template": ".potx",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": ".xltx",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": ".dotx",
|
||||
"application/vnd.sun.xml.writer": ".sxw",
|
||||
"text/richtext": ".rtf",
|
||||
}
|
||||
*/
|
||||
|
||||
// Load the example export formats into exportFormats for testing
|
||||
func TestInternalLoadExampleExportFormats(t *testing.T) {
|
||||
exportFormatsOnce.Do(func() {})
|
||||
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &_exportFormats))
|
||||
func TestInternalLoadExampleFormats(t *testing.T) {
|
||||
fetchFormatsOnce.Do(func() {})
|
||||
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
|
||||
var about struct {
|
||||
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
|
||||
ImportFormats map[string][]string `json:"importFormats,omitempty"`
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, json.Unmarshal(buf, &about))
|
||||
_exportFormats = fixMimeTypeMap(about.ExportFormats)
|
||||
_importFormats = fixMimeTypeMap(about.ImportFormats)
|
||||
}
|
||||
|
||||
func TestInternalParseExtensions(t *testing.T) {
|
||||
@@ -65,27 +85,24 @@ func TestInternalParseExtensions(t *testing.T) {
|
||||
want []string
|
||||
wantErr error
|
||||
}{
|
||||
{"doc", []string{"doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
|
||||
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
|
||||
{"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
|
||||
{"doc", []string{".doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
|
||||
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
||||
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
||||
} {
|
||||
f := new(Fs)
|
||||
gotErr := f.parseExtensions(test.in)
|
||||
extensions, _, gotErr := parseExtensions(test.in)
|
||||
if test.wantErr == nil {
|
||||
assert.NoError(t, gotErr)
|
||||
} else {
|
||||
assert.EqualError(t, gotErr, test.wantErr.Error())
|
||||
}
|
||||
assert.Equal(t, test.want, f.extensions)
|
||||
assert.Equal(t, test.want, extensions)
|
||||
}
|
||||
|
||||
// Test it is appending
|
||||
f := new(Fs)
|
||||
assert.Nil(t, f.parseExtensions("docx,svg"))
|
||||
assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
|
||||
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
|
||||
|
||||
extensions, _, gotErr := parseExtensions("docx,svg", "docx,svg,xlsx")
|
||||
assert.NoError(t, gotErr)
|
||||
assert.Equal(t, []string{".docx", ".svg", ".xlsx"}, extensions)
|
||||
}
|
||||
|
||||
func TestInternalFindExportFormat(t *testing.T) {
|
||||
@@ -99,17 +116,17 @@ func TestInternalFindExportFormat(t *testing.T) {
|
||||
wantMimeType string
|
||||
}{
|
||||
{[]string{}, "", ""},
|
||||
{[]string{"pdf"}, "pdf", "application/pdf"},
|
||||
{[]string{"pdf", "rtf", "xls"}, "pdf", "application/pdf"},
|
||||
{[]string{"xls", "rtf", "pdf"}, "rtf", "application/rtf"},
|
||||
{[]string{"xls", "csv", "svg"}, "", ""},
|
||||
{[]string{".pdf"}, ".pdf", "application/pdf"},
|
||||
{[]string{".pdf", ".rtf", ".xls"}, ".pdf", "application/pdf"},
|
||||
{[]string{".xls", ".rtf", ".pdf"}, ".rtf", "application/rtf"},
|
||||
{[]string{".xls", ".csv", ".svg"}, "", ""},
|
||||
} {
|
||||
f := new(Fs)
|
||||
f.extensions = test.extensions
|
||||
f.exportExtensions = test.extensions
|
||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
|
||||
assert.Equal(t, test.wantExtension, gotExtension)
|
||||
if test.wantExtension != "" {
|
||||
assert.Equal(t, item.Name+"."+gotExtension, gotFilename)
|
||||
assert.Equal(t, item.Name+gotExtension, gotFilename)
|
||||
} else {
|
||||
assert.Equal(t, "", gotFilename)
|
||||
}
|
||||
@@ -117,3 +134,155 @@ func TestInternalFindExportFormat(t *testing.T) {
|
||||
assert.Equal(t, true, gotIsDocument)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMimeTypesToExtension(t *testing.T) {
|
||||
for mimeType, extension := range _mimeTypeToExtension {
|
||||
extensions, err := mime.ExtensionsByType(mimeType)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, extensions, extension)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionToMimeType(t *testing.T) {
|
||||
for mimeType, extension := range _mimeTypeToExtension {
|
||||
gotMimeType := mime.TypeByExtension(extension)
|
||||
mediatype, _, err := mime.ParseMediaType(gotMimeType)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, mimeType, mediatype)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionsForExportFormats(t *testing.T) {
|
||||
if _exportFormats == nil {
|
||||
t.Error("exportFormats == nil")
|
||||
}
|
||||
for fromMT, toMTs := range _exportFormats {
|
||||
for _, toMT := range toMTs {
|
||||
if !isInternalMimeType(toMT) {
|
||||
extensions, err := mime.ExtensionsByType(toMT)
|
||||
assert.NoError(t, err, "invalid MIME type %q", toMT)
|
||||
assert.NotEmpty(t, extensions, "No extension found for %q (from: %q)", fromMT, toMT)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionsForImportFormats(t *testing.T) {
|
||||
t.Skip()
|
||||
if _importFormats == nil {
|
||||
t.Error("_importFormats == nil")
|
||||
}
|
||||
for fromMT := range _importFormats {
|
||||
if !isInternalMimeType(fromMT) {
|
||||
extensions, err := mime.ExtensionsByType(fromMT)
|
||||
assert.NoError(t, err, "invalid MIME type %q", fromMT)
|
||||
assert.NotEmpty(t, extensions, "No extension found for %q", fromMT)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
oldAllow := f.opt.AllowImportNameChange
|
||||
f.opt.AllowImportNameChange = true
|
||||
defer func() {
|
||||
f.opt.AllowImportNameChange = oldAllow
|
||||
}()
|
||||
|
||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||
require.NoError(t, err)
|
||||
|
||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
|
||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||
require.NoError(t, err)
|
||||
|
||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentExport(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
|
||||
f.exportExtensions, _, err = parseExtensions("txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := f.NewObject("example2.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open()
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
_, err = io.Copy(&buf, rc)
|
||||
require.NoError(t, err)
|
||||
text := buf.String()
|
||||
|
||||
for _, excerpt := range []string{
|
||||
"Lorem ipsum dolor sit amet, consectetur",
|
||||
"porta at ultrices in, consectetur at augue.",
|
||||
} {
|
||||
require.Contains(t, text, excerpt)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
|
||||
f.exportExtensions, _, err = parseExtensions("link.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := f.NewObject("example2.link.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open()
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
_, err = io.Copy(&buf, rc)
|
||||
require.NoError(t, err)
|
||||
text := buf.String()
|
||||
|
||||
require.True(t, strings.HasPrefix(text, "<html>"))
|
||||
require.True(t, strings.HasSuffix(text, "</html>\n"))
|
||||
for _, excerpt := range []string{
|
||||
`<meta http-equiv="refresh"`,
|
||||
`Loading <a href="`,
|
||||
} {
|
||||
require.Contains(t, text, excerpt)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
f.InternalTestDocumentImport(t)
|
||||
t.Run("DocumentUpdate", func(t *testing.T) {
|
||||
f.InternalTestDocumentUpdate(t)
|
||||
t.Run("DocumentExport", func(t *testing.T) {
|
||||
f.InternalTestDocumentExport(t)
|
||||
t.Run("DocumentLink", func(t *testing.T) {
|
||||
f.InternalTestDocumentLink(t)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
// Test Drive filesystem interface
|
||||
package drive_test
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +15,23 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDrive:",
|
||||
NilObject: (*drive.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
CeilChunkSize: fstests.NextPowerOfTwo,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
6
backend/drive/drive_unsupported.go
Normal file
6
backend/drive/drive_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package drive
|
||||
178
backend/drive/test/about.json
Normal file
178
backend/drive/test/about.json
Normal file
@@ -0,0 +1,178 @@
|
||||
{
|
||||
"importFormats": {
|
||||
"text/tab-separated-values": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/x-vnd.oasis.opendocument.presentation": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"image/jpeg": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"image/bmp": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"image/gif": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-powerpoint.presentation.macroenabled.12": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.ms-word.template.macroenabled.12": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"image/pjpeg": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.google-apps.script+text/plain": [
|
||||
"application/vnd.google-apps.script"
|
||||
],
|
||||
"application/vnd.ms-excel": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.sun.xml.writer": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-word.document.macroenabled.12": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"text/rtf": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"text/plain": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.oasis.opendocument.spreadsheet": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"image/png": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/x-vnd.oasis.opendocument.text": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/msword": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/pdf": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/json": [
|
||||
"application/vnd.google-apps.script"
|
||||
],
|
||||
"application/x-msmetafile": [
|
||||
"application/vnd.google-apps.drawing"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.ms-powerpoint": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.ms-excel.template.macroenabled.12": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"image/x-bmp": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/rtf": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.template": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"image/x-png": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"text/html": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.oasis.opendocument.text": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.script+json": [
|
||||
"application/vnd.google-apps.script"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.slideshow": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.ms-powerpoint.template.macroenabled.12": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"text/csv": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.oasis.opendocument.presentation": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"image/jpg": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"text/richtext": [
|
||||
"application/vnd.google-apps.document"
|
||||
]
|
||||
},
|
||||
"exportFormats": {
|
||||
"application/vnd.google-apps.document": [
|
||||
"application/rtf",
|
||||
"application/vnd.oasis.opendocument.text",
|
||||
"text/html",
|
||||
"application/pdf",
|
||||
"application/epub+zip",
|
||||
"application/zip",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.spreadsheet": [
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet",
|
||||
"text/tab-separated-values",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"text/csv",
|
||||
"application/zip",
|
||||
"application/vnd.oasis.opendocument.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.jam": [
|
||||
"application/pdf"
|
||||
],
|
||||
"application/vnd.google-apps.script": [
|
||||
"application/vnd.google-apps.script+json"
|
||||
],
|
||||
"application/vnd.google-apps.presentation": [
|
||||
"application/vnd.oasis.opendocument.presentation",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.form": [
|
||||
"application/zip"
|
||||
],
|
||||
"application/vnd.google-apps.drawing": [
|
||||
"image/svg+xml",
|
||||
"image/png",
|
||||
"application/pdf",
|
||||
"image/jpeg"
|
||||
]
|
||||
}
|
||||
}
|
||||
BIN
backend/drive/test/files/example1.ods
Normal file
BIN
backend/drive/test/files/example1.ods
Normal file
Binary file not shown.
BIN
backend/drive/test/files/example2.doc
Normal file
BIN
backend/drive/test/files/example2.doc
Normal file
Binary file not shown.
BIN
backend/drive/test/files/example3.odt
Normal file
BIN
backend/drive/test/files/example3.odt
Normal file
Binary file not shown.
@@ -8,6 +8,8 @@
|
||||
//
|
||||
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
@@ -50,11 +52,12 @@ type resumableUpload struct {
|
||||
}
|
||||
|
||||
// Upload the io.Reader in of size bytes with contentType and info
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
|
||||
params := make(url.Values)
|
||||
params.Set("alt", "json")
|
||||
params.Set("uploadType", "resumable")
|
||||
params.Set("fields", partialFields)
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
|
||||
params := url.Values{
|
||||
"alt": {"json"},
|
||||
"uploadType": {"resumable"},
|
||||
"fields": {partialFields},
|
||||
}
|
||||
if f.isTeamDrive {
|
||||
params.Set("supportsTeamDrives", "true")
|
||||
}
|
||||
@@ -182,7 +185,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
|
||||
// been 200 OK.
|
||||
//
|
||||
// So parse the response out of the body. We aren't expecting
|
||||
// any other 2xx codes, so we parse it unconditionaly on
|
||||
// any other 2xx codes, so we parse it unconditionally on
|
||||
// StatusCode
|
||||
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
|
||||
return 598, err
|
||||
|
||||
@@ -31,9 +31,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
@@ -79,8 +81,8 @@ const (
|
||||
// Choose 48MB which is 91% of Maximum speed. rclone by
|
||||
// default does 4 transfers so this should use 4*48MB = 192MB
|
||||
// by default.
|
||||
defaultChunkSize = 48 * 1024 * 1024
|
||||
maxChunkSize = 150 * 1024 * 1024
|
||||
defaultChunkSize = 48 * fs.MebiByte
|
||||
maxChunkSize = 150 * fs.MebiByte
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -120,9 +122,21 @@ func init() {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Dropbox App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: fmt.Sprintf("Upload chunk size. Max %v.", fs.SizeSuffix(maxChunkSize)),
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Name: "chunk_size",
|
||||
Help: fmt.Sprintf(`Upload chunk size. (< %v).
|
||||
|
||||
Any files larger than this will be uploaded in chunks of this size.
|
||||
|
||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
||||
deal with retries. Setting this larger will increase the speed
|
||||
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
||||
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Help: "Impersonate this user when using a business account.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
@@ -130,7 +144,8 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@@ -142,6 +157,7 @@ type Fs struct {
|
||||
srv files.Client // the connection to the dropbox server
|
||||
sharing sharing.Client // as above, but for generating sharing links
|
||||
users users.Client // as above, but for accessing user information
|
||||
team team.Client // for the Teams API
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
@@ -188,14 +204,42 @@ func shouldRetry(err error) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
baseErrString := errors.Cause(err).Error()
|
||||
// FIXME there is probably a better way of doing this!
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
||||
// handle any official Retry-After header from Dropbox's SDK first
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behavior for backward compatibility
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
if cs > maxChunkSize {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -203,8 +247,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize > maxChunkSize {
|
||||
return nil, errors.Errorf("chunk size too big, must be < %v", maxChunkSize)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "dropbox: chunk size")
|
||||
}
|
||||
|
||||
// Convert the old token if it exists. The old token was just
|
||||
@@ -235,6 +280,29 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
Client: oAuthClient, // maybe???
|
||||
HeaderGenerator: f.headerGenerator,
|
||||
}
|
||||
|
||||
// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
|
||||
f.team = team.New(config)
|
||||
|
||||
if opt.Impersonate != "" {
|
||||
|
||||
user := team.UserSelectorArg{
|
||||
Email: opt.Impersonate,
|
||||
}
|
||||
user.Tag = "email"
|
||||
|
||||
members := []*team.UserSelectorArg{&user}
|
||||
args := team.NewMembersGetInfoArgs(members)
|
||||
|
||||
memberIds, err := f.team.MembersGetInfo(args)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
|
||||
}
|
||||
|
||||
config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||
}
|
||||
|
||||
f.srv = files.New(config)
|
||||
f.sharing = sharing.New(config)
|
||||
f.users = users.New(config)
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test Dropbox filesystem interface
|
||||
package dropbox_test
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/dropbox"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,15 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDropbox:",
|
||||
NilObject: (*dropbox.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -166,7 +166,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||
// Parse config into Options struct
|
||||
@@ -646,7 +646,21 @@ func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
|
||||
|
||||
// Close the FTP reader and return the connection to the pool
|
||||
func (f *ftpReadCloser) Close() error {
|
||||
err := f.rc.Close()
|
||||
var err error
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
errchan <- f.rc.Close()
|
||||
}()
|
||||
// Wait for Close for up to 60 seconds
|
||||
timer := time.NewTimer(60 * time.Second)
|
||||
select {
|
||||
case err = <-errchan:
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f.f, "Timeout when waiting for connection Close")
|
||||
return nil
|
||||
}
|
||||
// if errors while reading or closing, dump the connection
|
||||
if err != nil || f.err != nil {
|
||||
_ = f.c.Quit()
|
||||
@@ -704,6 +718,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
// remove the file if upload failed
|
||||
remove := func() {
|
||||
// Give the FTP server a chance to get its internal state in order after the error.
|
||||
// The error may have been local in which case we closed the connection. The server
|
||||
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
|
||||
// able to think of a better method to find out if the server has finished - ncw
|
||||
time.Sleep(1 * time.Second)
|
||||
removeErr := o.Remove()
|
||||
if removeErr != nil {
|
||||
fs.Debugf(o, "Failed to remove: %v", removeErr)
|
||||
@@ -717,7 +736,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
err = c.Stor(path, in)
|
||||
if err != nil {
|
||||
_ = c.Quit()
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
remove()
|
||||
return errors.Wrap(err, "update stor")
|
||||
}
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Package googlecloudstorage provides an interface to Google Cloud Storage
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
|
||||
/*
|
||||
@@ -159,21 +162,36 @@ func init() {
|
||||
}, {
|
||||
Value: "asia-east1",
|
||||
Help: "Taiwan.",
|
||||
}, {
|
||||
Value: "asia-east2",
|
||||
Help: "Hong Kong.",
|
||||
}, {
|
||||
Value: "asia-northeast1",
|
||||
Help: "Tokyo.",
|
||||
}, {
|
||||
Value: "asia-south1",
|
||||
Help: "Mumbai.",
|
||||
}, {
|
||||
Value: "asia-southeast1",
|
||||
Help: "Singapore.",
|
||||
}, {
|
||||
Value: "australia-southeast1",
|
||||
Help: "Sydney.",
|
||||
}, {
|
||||
Value: "europe-north1",
|
||||
Help: "Finland.",
|
||||
}, {
|
||||
Value: "europe-west1",
|
||||
Help: "Belgium.",
|
||||
}, {
|
||||
Value: "europe-west2",
|
||||
Help: "London.",
|
||||
}, {
|
||||
Value: "europe-west3",
|
||||
Help: "Frankfurt.",
|
||||
}, {
|
||||
Value: "europe-west4",
|
||||
Help: "Netherlands.",
|
||||
}, {
|
||||
Value: "us-central1",
|
||||
Help: "Iowa.",
|
||||
@@ -186,6 +204,9 @@ func init() {
|
||||
}, {
|
||||
Value: "us-west1",
|
||||
Help: "Oregon.",
|
||||
}, {
|
||||
Value: "us-west2",
|
||||
Help: "California.",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
@@ -279,7 +300,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry determines whehter a given err rates being retried
|
||||
// shouldRetry determines whether a given err rates being retried
|
||||
func shouldRetry(err error) (again bool, errOut error) {
|
||||
again = false
|
||||
if err != nil {
|
||||
@@ -327,7 +348,7 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
var oAuthClient *http.Client
|
||||
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
@@ -40,6 +40,9 @@ func init() {
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
}, {
|
||||
Value: "https://user:pass@example.com",
|
||||
Help: "Connect to example.com using a username and password",
|
||||
}},
|
||||
}},
|
||||
}
|
||||
@@ -193,7 +196,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
}
|
||||
err := o.stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Stat failed")
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
@@ -248,7 +251,7 @@ func parseName(base *url.URL, name string) (string, error) {
|
||||
}
|
||||
// calculate the name relative to the base
|
||||
name = u.Path[len(base.Path):]
|
||||
// musn't be empty
|
||||
// mustn't be empty
|
||||
if name == "" {
|
||||
return "", errNameIsEmpty
|
||||
}
|
||||
@@ -416,6 +419,9 @@ func (o *Object) url() string {
|
||||
func (o *Object) stat() error {
|
||||
url := o.url()
|
||||
res, err := o.fs.httpClient.Head(url)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stat")
|
||||
|
||||
@@ -144,6 +144,11 @@ func TestNewObject(t *testing.T) {
|
||||
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
|
||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||
|
||||
// check object not found
|
||||
o, err = f.NewObject("not found.txt")
|
||||
assert.Nil(t, o)
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
|
||||
@@ -9,8 +9,10 @@ package hubic
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
@@ -124,7 +126,9 @@ func (f *Fs) getCredentials() (err error) {
|
||||
}
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
return errors.Errorf("failed to get credentials: %s", resp.Status)
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
|
||||
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
}
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
var result credentials
|
||||
|
||||
@@ -9,7 +9,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// default time format for almost all request and responses
|
||||
timeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
// the API server seems to use a different format
|
||||
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
|
||||
)
|
||||
|
||||
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
|
||||
@@ -40,6 +43,9 @@ func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
// Return Time string in Jottacloud format
|
||||
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
|
||||
// Flag is a hacky type for checking if an attribute is present
|
||||
type Flag bool
|
||||
|
||||
@@ -58,6 +64,15 @@ func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
|
||||
return attr, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type TokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>
|
||||
|
||||
@@ -265,3 +280,37 @@ func (e *Error) Error() string {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AllocateFileRequest to prepare an upload to Jottacloud
|
||||
type AllocateFileRequest struct {
|
||||
Bytes int64 `json:"bytes"`
|
||||
Created string `json:"created"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// AllocateFileResponse for upload requests
|
||||
type AllocateFileResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
State string `json:"state"`
|
||||
UploadID string `json:"upload_id"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
ResumePos int64 `json:"resume_pos"`
|
||||
}
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -26,22 +27,41 @@ import (
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// Globals
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Sync"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com"
|
||||
shareURL = "https://www.jottacloud.com/"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Sync" // nolint
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/files/v1/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
configUsername = "user"
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app for a personal account
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: tokenURL,
|
||||
TokenURL: tokenURL,
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -50,13 +70,71 @@ func init() {
|
||||
Name: "jottacloud",
|
||||
Description: "JottaCloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
username, ok := m.Get(configUsername)
|
||||
if !ok {
|
||||
log.Fatalf("No username defined")
|
||||
}
|
||||
password := config.GetPassword("Your Jottacloud password is only required during config and will not be stored.")
|
||||
|
||||
// prepare out token request with username and password
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
values := url.Values{}
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
values.Set("password", password)
|
||||
values.Set("username", username)
|
||||
values.Set("client_id", oauthConfig.ClientID)
|
||||
values.Set("client_secret", oauthConfig.ClientSecret)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var jsonToken api.TokenJSON
|
||||
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account has 2 factor authentication enabled you will receive a verification code via SMS.\n")
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := config.ReadLine()
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // the sms received contains a pair of 3 digit numbers seperated by '-' but wants a single 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get resource token: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var token oauth2.Token
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
token.TokenType = jsonToken.TokenType
|
||||
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
||||
|
||||
// finally save them in the config
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while setting token: %s", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User Name",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
IsPassword: true,
|
||||
Name: configUsername,
|
||||
Help: "User Name:",
|
||||
}, {
|
||||
Name: "mountpoint",
|
||||
Help: "The mountpoint to use.",
|
||||
@@ -80,9 +158,14 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "unlink",
|
||||
Help: "Remove existing public link to file/folder with link command rather than creating.",
|
||||
Help: "Remove existing public link to file/folder with link command rather than creating.\nDefault is false, meaning link command will create or retrieve public link.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_resume_limit",
|
||||
Help: "Files bigger than this can be resumed if the upload fail's.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -90,23 +173,25 @@ func init() {
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Mountpoint string `config:"mountpoint"`
|
||||
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
Unlink bool `config:"unlink"`
|
||||
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
|
||||
}
|
||||
|
||||
// Fs represents a remote jottacloud
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
user string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpointURL string
|
||||
srv *rest.Client
|
||||
pacer *pacer.Pacer
|
||||
name string
|
||||
root string
|
||||
user string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpointURL string
|
||||
srv *rest.Client
|
||||
apiSrv *rest.Client
|
||||
pacer *pacer.Pacer
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
// Object describes a jottacloud object
|
||||
@@ -199,7 +284,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
||||
func (f *Fs) getAccountInfo() (info *api.AccountInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: rest.URLPathEscape(f.user),
|
||||
Path: urlPathEscape(f.user),
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
@@ -220,7 +305,7 @@ func (f *Fs) setEndpointURL(mountpoint string) (err error) {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get endpoint url")
|
||||
}
|
||||
f.endpointURL = rest.URLPathEscape(path.Join(info.Username, defaultDevice, mountpoint))
|
||||
f.endpointURL = urlPathEscape(path.Join(info.Username, defaultDevice, mountpoint))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -241,6 +326,11 @@ func errorHandler(resp *http.Response) error {
|
||||
return errResponse
|
||||
}
|
||||
|
||||
// Jottacloud want's '+' to be URL encoded even though the RFC states it's not reserved
|
||||
func urlPathEscape(in string) string {
|
||||
return strings.Replace(rest.URLPathEscape(in), "+", "%2B", -1)
|
||||
}
|
||||
|
||||
// filePathRaw returns an unescaped file path (f.root, file)
|
||||
func (f *Fs) filePathRaw(file string) string {
|
||||
return path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, file)))
|
||||
@@ -248,7 +338,7 @@ func (f *Fs) filePathRaw(file string) string {
|
||||
|
||||
// filePath returns a escaped file path (f.root, file)
|
||||
func (f *Fs) filePath(file string) string {
|
||||
return rest.URLPathEscape(f.filePathRaw(file))
|
||||
return urlPathEscape(f.filePathRaw(file))
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, remote)
|
||||
@@ -256,6 +346,29 @@ func (o *Object) filePath() string {
|
||||
return o.fs.filePath(o.remote)
|
||||
}
|
||||
|
||||
// Jottacloud requires the grant_type 'refresh_token' string
|
||||
// to be uppercase and throws a 400 Bad Request if we use the
|
||||
// lower case used by the oauth2 module
|
||||
//
|
||||
// This filter catches all refresh requests, reads the body,
|
||||
// changes the case and then sends it on
|
||||
func grantTypeFilter(req *http.Request) {
|
||||
if tokenURL == req.URL.String() {
|
||||
// read the entire body
|
||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = req.Body.Close()
|
||||
|
||||
// make the refresh token upper case
|
||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||
|
||||
// set the new ReadCloser (with a dummy Close())
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -268,25 +381,29 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = parsePath(root)
|
||||
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
|
||||
if opt.Pass != "" {
|
||||
var err error
|
||||
opt.Pass, err = obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
||||
}
|
||||
// the oauth client for the api servers needs
|
||||
// a filter to fix the grant_type issues (see above)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
SetRequestFilter(f func(req *http.Request))
|
||||
}); ok {
|
||||
do.SetRequestFilter(grantTypeFilter)
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
user: opt.User,
|
||||
opt: *opt,
|
||||
//endpointURL: rest.URLPathEscape(path.Join(user, defaultDevice, opt.Mountpoint)),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
name: name,
|
||||
root: root,
|
||||
user: opt.User,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -294,14 +411,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
}).Fill(f)
|
||||
|
||||
if user == "" || pass == "" {
|
||||
return nil, errors.New("jottacloud needs user and password")
|
||||
}
|
||||
|
||||
f.srv.SetUserPass(opt.User, opt.Pass)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath("")
|
||||
return err
|
||||
})
|
||||
|
||||
err = f.setEndpointURL(opt.Mountpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get account info")
|
||||
@@ -326,7 +443,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -343,7 +459,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData() // reads info and meta, returning an error
|
||||
err = o.readMetaData(false) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -391,7 +507,7 @@ func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
//fmt.Printf("List: %s\n", dir)
|
||||
//fmt.Printf("List: %s\n", f.filePath(dir))
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(dir),
|
||||
@@ -459,12 +575,12 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f
|
||||
if folder.Deleted {
|
||||
return nil
|
||||
}
|
||||
folderPath := path.Join(folder.Path, folder.Name)
|
||||
remoteDirLength := len(folderPath) - pathPrefixLength
|
||||
folderPath := restoreReservedChars(path.Join(folder.Path, folder.Name))
|
||||
folderPathLength := len(folderPath)
|
||||
var remoteDir string
|
||||
if remoteDirLength > 0 {
|
||||
remoteDir = restoreReservedChars(folderPath[pathPrefixLength+1:])
|
||||
if remoteDirLength > startPathLength {
|
||||
if folderPathLength > pathPrefixLength {
|
||||
remoteDir = folderPath[pathPrefixLength+1:]
|
||||
if folderPathLength > startPathLength {
|
||||
d := fs.NewDir(remoteDir, time.Time(folder.ModifiedAt))
|
||||
err := fn(d)
|
||||
if err != nil {
|
||||
@@ -653,7 +769,7 @@ func (f *Fs) Purge() error {
|
||||
return f.purgeCheck("", false)
|
||||
}
|
||||
|
||||
// copyOrMoves copys or moves directories or files depending on the mthod parameter
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -671,7 +787,6 @@ func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err erro
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@@ -819,7 +934,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
if result.PublicSharePath == "" {
|
||||
return "", errors.New("couldn't create public link - no link path received")
|
||||
}
|
||||
link = path.Join(shareURL, result.PublicSharePath)
|
||||
link = path.Join(baseURL, result.PublicSharePath)
|
||||
return link, nil
|
||||
}
|
||||
|
||||
@@ -875,7 +990,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(false)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
@@ -891,21 +1006,24 @@ func (o *Object) MimeType() string {
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.JottaFile) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = int64(info.Size)
|
||||
o.size = info.Size
|
||||
o.md5 = info.MD5
|
||||
o.mimeType = info.MimeType
|
||||
o.modTime = time.Time(info.ModifiedAt)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.hasMetaData {
|
||||
func (o *Object) readMetaData(force bool) (err error) {
|
||||
if o.hasMetaData && !force {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Deleted {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
@@ -914,7 +1032,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(false)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -962,7 +1080,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) {
|
||||
// we need a MD5
|
||||
md5Hasher := md5.New()
|
||||
// use the teeReader to write to the local file AND caclulate the MD5 while doing so
|
||||
// use the teeReader to write to the local file AND calculate the MD5 while doing so
|
||||
teeReader := io.TeeReader(in, md5Hasher)
|
||||
|
||||
// nothing to clean up by default
|
||||
@@ -1035,43 +1153,74 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
in = wrap(in)
|
||||
}
|
||||
|
||||
// use the api to allocate the file first and get resume / deduplication info
|
||||
var resp *http.Response
|
||||
var result api.JottaFile
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: o.filePath(),
|
||||
Body: in,
|
||||
ContentType: fs.MimeType(src),
|
||||
ContentLength: &size,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
Parameters: url.Values{},
|
||||
Method: "POST",
|
||||
Path: "allocate",
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(src.ModTime()).APIString()
|
||||
|
||||
// the allocate request
|
||||
var request = api.AllocateFileRequest{
|
||||
Bytes: size,
|
||||
Created: fileDate,
|
||||
Modified: fileDate,
|
||||
Md5: md5String,
|
||||
Path: path.Join(o.fs.opt.Mountpoint, replaceReservedChars(path.Join(o.fs.root, o.remote))),
|
||||
}
|
||||
|
||||
opts.ExtraHeaders["JMd5"] = md5String
|
||||
opts.Parameters.Set("cphash", md5String)
|
||||
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
|
||||
// opts.ExtraHeaders["JCreated"] = api.Time(src.ModTime()).String()
|
||||
opts.ExtraHeaders["JModified"] = api.Time(src.ModTime()).String()
|
||||
|
||||
// Parameters observed in other implementations
|
||||
//opts.ExtraHeaders["X-Jfs-DeviceName"] = "Jotta"
|
||||
//opts.ExtraHeaders["X-Jfs-Devicename-Base64"] = ""
|
||||
//opts.ExtraHeaders["X-Jftp-Version"] = "2.4" this appears to be the current version
|
||||
//opts.ExtraHeaders["jx_csid"] = ""
|
||||
//opts.ExtraHeaders["jx_lisence"] = ""
|
||||
|
||||
opts.Parameters.Set("umode", "nomultipart")
|
||||
|
||||
// send it
|
||||
var response api.AllocateFileResponse
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallXML(&opts, nil, &result)
|
||||
resp, err = o.fs.apiSrv.CallJSON(&opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Check returned Metadata? Timeout on big uploads?
|
||||
return o.setMetaData(&result)
|
||||
// If the file state is INCOMPLETE and CORRPUT, try to upload a then
|
||||
if response.State != "COMPLETED" {
|
||||
// how much do we still have to upload?
|
||||
remainingBytes := size - response.ResumePos
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: response.UploadURL,
|
||||
ContentLength: &remainingBytes,
|
||||
ContentType: "application/octet-stream",
|
||||
Body: in,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
if response.ResumePos != 0 {
|
||||
opts.ExtraHeaders["Range"] = "bytes=" + strconv.FormatInt(response.ResumePos, 10) + "-" + strconv.FormatInt(size-1, 10)
|
||||
}
|
||||
|
||||
// copy the already uploaded bytes into the trash :)
|
||||
var result api.UploadResponse
|
||||
_, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// send the remaining bytes
|
||||
resp, err = o.fs.apiSrv.CallJSON(&opts, nil, &result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// finally update the meta data
|
||||
o.hasMetaData = true
|
||||
o.size = result.Bytes
|
||||
o.md5 = result.Md5
|
||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||
} else {
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was allready found but we still ned to update our metadata
|
||||
return o.readMetaData(true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
@@ -1080,6 +1229,7 @@ func (o *Object) Remove() error {
|
||||
Method: "POST",
|
||||
Path: o.filePath(),
|
||||
Parameters: url.Values{},
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
if o.fs.opt.HardDelete {
|
||||
|
||||
@@ -4,50 +4,25 @@ import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// A test reader to return a test pattern of size
|
||||
type testReader struct {
|
||||
size int64
|
||||
c byte
|
||||
}
|
||||
|
||||
// Reader is the interface that wraps the basic Read method.
|
||||
func (r *testReader) Read(p []byte) (n int, err error) {
|
||||
for i := range p {
|
||||
if r.size <= 0 {
|
||||
return n, io.EOF
|
||||
}
|
||||
p[i] = r.c
|
||||
r.c = (r.c + 1) % 253
|
||||
r.size--
|
||||
n++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestReadMD5(t *testing.T) {
|
||||
// smoke test the reader
|
||||
b, err := ioutil.ReadAll(&testReader{size: 10})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, b)
|
||||
|
||||
// Check readMD5 for different size and threshold
|
||||
for _, size := range []int64{0, 1024, 10 * 1024, 100 * 1024} {
|
||||
t.Run(fmt.Sprintf("%d", size), func(t *testing.T) {
|
||||
hasher := md5.New()
|
||||
n, err := io.Copy(hasher, &testReader{size: size})
|
||||
n, err := io.Copy(hasher, readers.NewPatternReader(size))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, n, size)
|
||||
wantMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
|
||||
for _, threshold := range []int64{512, 1024, 10 * 1024, 20 * 1024} {
|
||||
t.Run(fmt.Sprintf("%d", threshold), func(t *testing.T) {
|
||||
in := &testReader{size: size}
|
||||
in := readers.NewPatternReader(size)
|
||||
gotMD5, out, cleanup, err := readMD5(in, size, threshold)
|
||||
defer cleanup()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Translate file names for JottaCloud adapted from OneDrive
|
||||
|
||||
|
||||
The following characters are JottaClous reserved characters, and can't
|
||||
The following characters are JottaCloud reserved characters, and can't
|
||||
be used in JottaCloud folder and file names.
|
||||
|
||||
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
|
||||
@@ -27,21 +27,14 @@ import (
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'+': '+', // FULLWIDTH PLUS SIGN
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
'!': '!', // FULLWIDTH EXCLAMATION MARK
|
||||
'&': '&', // FULLWIDTH AMPERSAND
|
||||
':': ':', // FULLWIDTH COLON
|
||||
';': ';', // FULLWIDTH SEMICOLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'#': '#', // FULLWIDTH NUMBER SIGN
|
||||
'%': '%', // FULLWIDTH PERCENT SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'\'': ''', // FULLWIDTH APOSTROPHE
|
||||
'~': '~', // FULLWIDTH TILDE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
|
||||
@@ -9,8 +9,8 @@ func TestReplace(t *testing.T) {
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\+*<>?!&:;|#%"'~`, `\+*<>?!&:;|#%"'~`},
|
||||
{`\+*<>?!&:;|#%"'~\+*<>?!&:;|#%"'~`, `\+*<>?!&:;|#%"'~\+*<>?!&:;|#%"'~`},
|
||||
{`\*<>?:;|"`, `\*<>?:;|"`},
|
||||
{`\*<>?:;|"\*<>?:;|"`, `\*<>?:;|"\*<>?:;|"`},
|
||||
{" leading space", "␠leading space"},
|
||||
{"trailing space ", "trailing space␠"},
|
||||
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||
|
||||
@@ -16,7 +16,7 @@ func (f *Fs) About() (*fs.Usage, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||
}
|
||||
bs := int64(s.Bsize)
|
||||
bs := int64(s.Bsize) // nolint: unconvert
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
|
||||
|
||||
20
backend/local/lchtimes.go
Normal file
20
backend/local/lchtimes.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// +build windows plan9
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const haveLChtimes = false
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
28
backend/local/lchtimes_unix.go
Normal file
28
backend/local/lchtimes_unix.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// +build !windows,!plan9
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const haveLChtimes = true
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
var utimes [2]unix.Timespec
|
||||
utimes[0] = unix.NsecToTimespec(atime.UnixNano())
|
||||
utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
|
||||
if e := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); e != nil {
|
||||
return &os.PathError{Op: "lchtimes", Path: name, Err: e}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -2,6 +2,7 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -21,12 +22,14 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/file"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
@@ -49,19 +52,40 @@ func init() {
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: "Don't warn about skipped symlinks.",
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
This flag disables warning messages on skipped symlinks or junction
|
||||
points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_unicode_normalization",
|
||||
Help: "Don't apply unicode normalization to paths and filenames",
|
||||
Name: "no_unicode_normalization",
|
||||
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
|
||||
This flag is deprecated now. Rclone no longer normalizes unicode file
|
||||
names, but it compares them with unicode normalization in the sync
|
||||
routine instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_updated",
|
||||
Help: "Don't check to see if the files change during upload",
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy
|
||||
- source file is being updated" if the file changes during upload.
|
||||
|
||||
However on some file systems this modification time check may fail (eg
|
||||
[Glusterfs #2206](https://github.com/ncw/rclone/issues/2206)) so this
|
||||
check can be disabled with this flag.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -78,12 +102,13 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
TranslateSymlinks bool `config:"links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
@@ -105,17 +130,20 @@ type Fs struct {
|
||||
|
||||
// Object represents a local filesystem object
|
||||
type Object struct {
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path - properly UTF-8 encoded - for rclone
|
||||
path string // The local path - may not be properly UTF-8 encoded - for OS
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
hashes map[hash.Type]string // Hashes
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path - properly UTF-8 encoded - for rclone
|
||||
path string // The local path - may not be properly UTF-8 encoded - for OS
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
hashes map[hash.Type]string // Hashes
|
||||
translatedLink bool // Is this object a translated link
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -124,6 +152,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.TranslateSymlinks && opt.FollowSymlinks {
|
||||
return nil, errLinksAndCopyLinks
|
||||
}
|
||||
|
||||
if opt.NoUTFNorm {
|
||||
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
@@ -151,7 +182,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err == nil {
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
if err == nil && fi.Mode().IsRegular() {
|
||||
if err == nil && f.isRegular(fi.Mode()) {
|
||||
// It is a file, so use the parent as the root
|
||||
f.root = filepath.Dir(f.root)
|
||||
// return an error with an fs which points to the parent
|
||||
@@ -160,6 +191,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Determine whether a file is a 'regular' file,
|
||||
// Symlinks are regular files, only if the TranslateSymlink
|
||||
// option is in-effect
|
||||
func (f *Fs) isRegular(mode os.FileMode) bool {
|
||||
if !f.opt.TranslateSymlinks {
|
||||
return mode.IsRegular()
|
||||
}
|
||||
|
||||
// fi.Mode().IsRegular() tests that all mode bits are zero
|
||||
// Since symlinks are accepted, test that all other bits are zero,
|
||||
// except the symlink bit
|
||||
return mode&os.ModeType&^os.ModeSymlink == 0
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
@@ -180,28 +225,48 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// caseInsenstive returns whether the remote is case insensitive or not
|
||||
// caseInsensitive returns whether the remote is case insensitive or not
|
||||
func (f *Fs) caseInsensitive() bool {
|
||||
// FIXME not entirely accurate since you can have case
|
||||
// sensitive Fses on darwin and case insenstive Fses on linux.
|
||||
// sensitive Fses on darwin and case insensitive Fses on linux.
|
||||
// Should probably check but that would involve creating a
|
||||
// file in the remote to be most accurate which probably isn't
|
||||
// desirable.
|
||||
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
|
||||
}
|
||||
|
||||
// translateLink checks whether the remote is a translated link
|
||||
// and returns a new path, removing the suffix as needed,
|
||||
// It also returns whether this is a translated link at all
|
||||
//
|
||||
// for regular files, dstPath is returned unchanged
|
||||
func translateLink(remote, dstPath string) (newDstPath string, isTranslatedLink bool) {
|
||||
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
||||
newDstPath = strings.TrimSuffix(dstPath, linkSuffix)
|
||||
return newDstPath, isTranslatedLink
|
||||
}
|
||||
|
||||
// newObject makes a half completed Object
|
||||
//
|
||||
// if dstPath is empty then it is made from remote
|
||||
func (f *Fs) newObject(remote, dstPath string) *Object {
|
||||
translatedLink := false
|
||||
|
||||
if dstPath == "" {
|
||||
dstPath = f.cleanPath(filepath.Join(f.root, remote))
|
||||
}
|
||||
remote = f.cleanRemote(remote)
|
||||
|
||||
if f.opt.TranslateSymlinks {
|
||||
// Possibly receive a new name for dstPath
|
||||
dstPath, translatedLink = translateLink(remote, dstPath)
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: dstPath,
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: dstPath,
|
||||
translatedLink: translatedLink,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,6 +288,11 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||
if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
}
|
||||
if o.mode.IsDir() {
|
||||
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
|
||||
@@ -246,6 +316,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
|
||||
dir = f.dirNames.Load(dir)
|
||||
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
|
||||
remote := f.cleanRemote(dir)
|
||||
@@ -302,6 +373,10 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
entries = append(entries, d)
|
||||
}
|
||||
} else {
|
||||
// Check whether this link should be translated
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
}
|
||||
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -515,7 +590,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if !dstObj.mode.IsRegular() {
|
||||
} else if !dstObj.fs.isRegular(dstObj.mode) {
|
||||
// It isn't a file
|
||||
return nil, errors.New("can't move file onto non-file")
|
||||
}
|
||||
@@ -637,7 +712,13 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
|
||||
in, err := os.Open(o.path)
|
||||
var in io.ReadCloser
|
||||
|
||||
if !o.translatedLink {
|
||||
in, err = file.Open(o.path)
|
||||
} else {
|
||||
in, err = o.openTranslatedLink(0, -1)
|
||||
}
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to open")
|
||||
}
|
||||
@@ -668,7 +749,12 @@ func (o *Object) ModTime() time.Time {
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
err := os.Chtimes(o.path, modTime, modTime)
|
||||
var err error
|
||||
if o.translatedLink {
|
||||
err = lChtimes(o.path, modTime, modTime)
|
||||
} else {
|
||||
err = os.Chtimes(o.path, modTime, modTime)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -686,7 +772,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
}
|
||||
mode := o.mode
|
||||
if mode&os.ModeSymlink != 0 {
|
||||
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
|
||||
if !o.fs.opt.SkipSymlinks {
|
||||
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
||||
}
|
||||
@@ -747,6 +833,16 @@ func (file *localOpenFile) Close() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Returns a ReadCloser() object that contains the contents of a symbolic link
|
||||
func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err error) {
|
||||
// Read the link and return the destination it as the contents of the object
|
||||
linkdst, err := os.Readlink(o.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
@@ -766,7 +862,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
fd, err := os.Open(o.path)
|
||||
// Handle a translated link
|
||||
if o.translatedLink {
|
||||
return o.openTranslatedLink(offset, limit)
|
||||
}
|
||||
|
||||
fd, err := file.Open(o.path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -797,8 +898,19 @@ func (o *Object) mkdirAll() error {
|
||||
return os.MkdirAll(dir, 0777)
|
||||
}
|
||||
|
||||
type nopWriterCloser struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (nwc nopWriterCloser) Close() error {
|
||||
// noop
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the object from in with modTime and size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
var out io.WriteCloser
|
||||
|
||||
hashes := hash.Supported
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
@@ -812,9 +924,23 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
var symlinkData bytes.Buffer
|
||||
// If the object is a regular file, create it.
|
||||
// If it is a translated link, just read in the contents, and
|
||||
// then create a symlink
|
||||
if !o.translatedLink {
|
||||
f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = preAllocate(src.Size(), f)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
out = f
|
||||
} else {
|
||||
out = nopWriterCloser{&symlinkData}
|
||||
}
|
||||
|
||||
// Calculate the hash of the object we are reading as we go along
|
||||
@@ -829,6 +955,26 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
|
||||
if o.translatedLink {
|
||||
if err == nil {
|
||||
// Remove any current symlink or file, if one exists
|
||||
if _, err := os.Lstat(o.path); err == nil {
|
||||
if removeErr := os.Remove(o.path); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove previous file: %v", removeErr)
|
||||
return removeErr
|
||||
}
|
||||
}
|
||||
// Use the contents for the copied object to create a symlink
|
||||
err = os.Symlink(symlinkData.String(), o.path)
|
||||
}
|
||||
|
||||
// only continue if symlink creation succeeded
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fs.Logf(o, "Removing partially written file on error: %v", err)
|
||||
if removeErr := os.Remove(o.path); removeErr != nil {
|
||||
|
||||
@@ -1,13 +1,19 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/file"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -38,10 +44,13 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
filePath := "sub dir/local test"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
|
||||
fd, err := os.Open(path.Join(r.LocalName, filePath))
|
||||
fd, err := file.Open(path.Join(r.LocalName, filePath))
|
||||
if err != nil {
|
||||
t.Fatalf("failed opening file %q: %v", filePath, err)
|
||||
}
|
||||
defer func() {
|
||||
require.NoError(t, fd.Close())
|
||||
}()
|
||||
|
||||
fi, err := fd.Stat()
|
||||
require.NoError(t, err)
|
||||
@@ -72,3 +81,108 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestSymlink(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
f := r.Flocal.(*Fs)
|
||||
dir := f.root
|
||||
|
||||
// Write a file
|
||||
modTime1 := fstest.Time("2001-02-03T04:05:10.123123123Z")
|
||||
file1 := r.WriteFile("file.txt", "hello", modTime1)
|
||||
|
||||
// Write a symlink
|
||||
modTime2 := fstest.Time("2002-02-03T04:05:10.123123123Z")
|
||||
symlinkPath := filepath.Join(dir, "symlink.txt")
|
||||
require.NoError(t, os.Symlink("file.txt", symlinkPath))
|
||||
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
if runtime.GOOS == "windows" {
|
||||
file2.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
|
||||
// Check with no symlink flags
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2d)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
// Set fs into "-l" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
}
|
||||
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
if runtime.GOOS == "windows" {
|
||||
file3.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
}
|
||||
|
||||
// Check it got the correct contents
|
||||
symlinkPath = filepath.Join(dir, "symlink2.txt")
|
||||
fi, err := os.Lstat(symlinkPath)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, fi.Mode().IsRegular())
|
||||
linkText, err := os.Readlink(symlinkPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkText)
|
||||
|
||||
// Check that NewObject gets the correct object
|
||||
o, err := r.Flocal.NewObject("symlink2.txt" + linkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
}
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
_, err = r.Flocal.NewObject("symlink2.txt")
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Check reading the object
|
||||
in, err := o.Open()
|
||||
require.NoError(t, err)
|
||||
contents, err := ioutil.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt", string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
|
||||
// Check reading the object with range
|
||||
in, err = o.Open(&fs.RangeOption{Start: 2, End: 5})
|
||||
require.NoError(t, err)
|
||||
contents, err = ioutil.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt"[2:5+1], string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
}
|
||||
|
||||
func TestSymlinkError(t *testing.T) {
|
||||
m := configmap.Simple{
|
||||
"links": "true",
|
||||
"copy_links": "true",
|
||||
}
|
||||
_, err := NewFs("local", "/", m)
|
||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||
}
|
||||
|
||||
10
backend/local/preallocate_other.go
Normal file
10
backend/local/preallocate_other.go
Normal file
@@ -0,0 +1,10 @@
|
||||
//+build !windows,!linux
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
return nil
|
||||
}
|
||||
22
backend/local/preallocate_unix.go
Normal file
22
backend/local/preallocate_unix.go
Normal file
@@ -0,0 +1,22 @@
|
||||
//+build linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
if size <= 0 {
|
||||
return nil
|
||||
}
|
||||
err := unix.Fallocate(int(out.Fd()), unix.FALLOC_FL_KEEP_SIZE, 0, size)
|
||||
// FIXME could be doing something here
|
||||
// if err == unix.ENOSPC {
|
||||
// log.Printf("No space")
|
||||
// }
|
||||
return err
|
||||
}
|
||||
79
backend/local/preallocate_windows.go
Normal file
79
backend/local/preallocate_windows.go
Normal file
@@ -0,0 +1,79 @@
|
||||
//+build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var (
|
||||
ntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
ntQueryVolumeInformationFile = ntdll.NewProc("NtQueryVolumeInformationFile")
|
||||
ntSetInformationFile = ntdll.NewProc("NtSetInformationFile")
|
||||
)
|
||||
|
||||
type fileAllocationInformation struct {
|
||||
AllocationSize uint64
|
||||
}
|
||||
|
||||
type fileFsSizeInformation struct {
|
||||
TotalAllocationUnits uint64
|
||||
AvailableAllocationUnits uint64
|
||||
SectorsPerAllocationUnit uint32
|
||||
BytesPerSector uint32
|
||||
}
|
||||
|
||||
type ioStatusBlock struct {
|
||||
Status, Information uintptr
|
||||
}
|
||||
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
if size <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
iosb ioStatusBlock
|
||||
fsSizeInfo fileFsSizeInformation
|
||||
allocInfo fileAllocationInformation
|
||||
)
|
||||
|
||||
// Query info about the block sizes on the file system
|
||||
_, _, e1 := ntQueryVolumeInformationFile.Call(
|
||||
uintptr(out.Fd()),
|
||||
uintptr(unsafe.Pointer(&iosb)),
|
||||
uintptr(unsafe.Pointer(&fsSizeInfo)),
|
||||
uintptr(unsafe.Sizeof(fsSizeInfo)),
|
||||
uintptr(3), // FileFsSizeInformation
|
||||
)
|
||||
if e1 != nil && e1 != syscall.Errno(0) {
|
||||
return errors.Wrap(e1, "preAllocate NtQueryVolumeInformationFile failed")
|
||||
}
|
||||
|
||||
// Calculate the allocation size
|
||||
clusterSize := uint64(fsSizeInfo.BytesPerSector) * uint64(fsSizeInfo.SectorsPerAllocationUnit)
|
||||
if clusterSize <= 0 {
|
||||
return errors.Errorf("preAllocate clusterSize %d <= 0", clusterSize)
|
||||
}
|
||||
allocInfo.AllocationSize = (1 + uint64(size-1)/clusterSize) * clusterSize
|
||||
|
||||
// Ask for the allocation
|
||||
_, _, e1 = ntSetInformationFile.Call(
|
||||
uintptr(out.Fd()),
|
||||
uintptr(unsafe.Pointer(&iosb)),
|
||||
uintptr(unsafe.Pointer(&allocInfo)),
|
||||
uintptr(unsafe.Sizeof(allocInfo)),
|
||||
uintptr(19), // FileAllocationInformation
|
||||
)
|
||||
if e1 != nil && e1 != syscall.Errno(0) {
|
||||
return errors.Wrap(e1, "preAllocate NtSetInformationFile failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -22,5 +22,5 @@ func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
|
||||
fs.Debugf(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys())
|
||||
return devUnset
|
||||
}
|
||||
return uint64(statT.Dev)
|
||||
return uint64(statT.Dev) // nolint: unconvert
|
||||
}
|
||||
|
||||
@@ -63,13 +63,20 @@ func init() {
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "debug",
|
||||
Help: "Output more debug from Mega.",
|
||||
Name: "debug",
|
||||
Help: `Output more debug from Mega.
|
||||
|
||||
If this flag is set (along with -vv) it will print further debugging
|
||||
information from the mega backend.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Delete files permanently rather than putting them into the trash.",
|
||||
Name: "hard_delete",
|
||||
Help: `Delete files permanently rather than putting them into the trash.
|
||||
|
||||
Normally the mega backend will put all deletions into the trash rather
|
||||
than permanently deleting them. If you specify this then rclone will
|
||||
permanently delete objects instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -490,7 +497,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the dirNode, obect, leaf and error
|
||||
// Returns the dirNode, object, leaf and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
||||
@@ -516,10 +523,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(in, src, options...)
|
||||
return existingObj, existingObj.Update(in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(in, src)
|
||||
@@ -840,14 +847,14 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergDirs move failed on %q in %v", info.GetName(), srcDir)
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.GetName(), srcDir)
|
||||
}
|
||||
}
|
||||
// rmdir (into trash) the now empty source directory
|
||||
fs.Infof(srcDir, "removing empty directory")
|
||||
err = f.deleteNode(srcDirNode)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergDirs move failed to rmdir %q", srcDir)
|
||||
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -1069,6 +1076,9 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
size := src.Size()
|
||||
if size < 0 {
|
||||
return errors.New("mega backend can't upload a file of unknown length")
|
||||
}
|
||||
//modTime := src.ModTime()
|
||||
remote := o.Remote()
|
||||
|
||||
@@ -1119,7 +1129,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return errors.Wrap(err, "failed to finish upload")
|
||||
}
|
||||
|
||||
// If the upload succeded and the original object existed, then delete it
|
||||
// If the upload succeeded and the original object existed, then delete it
|
||||
if o.info != nil {
|
||||
err = o.fs.deleteNode(o.info)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,6 +9,9 @@ import (
|
||||
|
||||
const (
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
|
||||
// PackageTypeOneNote is the package type value for OneNote files
|
||||
PackageTypeOneNote = "oneNote"
|
||||
)
|
||||
|
||||
// Error is returned from one drive when things go wrong
|
||||
@@ -22,7 +25,7 @@ type Error struct {
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := e.ErrorInfo.Code
|
||||
if e.ErrorInfo.InnerError.Code != "" {
|
||||
@@ -32,7 +35,7 @@ func (e *Error) Error() string {
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error statisfies the error interface
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Identity represents an identity of an actor. For example, and actor
|
||||
@@ -107,6 +110,7 @@ type RemoteItemFacet struct {
|
||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
|
||||
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
|
||||
Size int64 `json:"size"` // Size of the item in bytes. Read-only.
|
||||
@@ -147,6 +151,13 @@ type FileSystemInfoFacet struct {
|
||||
type DeletedFacet struct {
|
||||
}
|
||||
|
||||
// PackageFacet indicates that a DriveItem is the top level item
|
||||
// in a "package" or a collection of items that should be treated as a collection instead of individual items.
|
||||
// `oneNote` is the only currently defined value.
|
||||
type PackageFacet struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// Item represents metadata for an item in OneDrive
|
||||
type Item struct {
|
||||
ID string `json:"id"` // The unique identifier of the item within the Drive. Read-only.
|
||||
@@ -170,6 +181,7 @@ type Item struct {
|
||||
// Audio *AudioFacet `json:"audio"` // Audio metadata, if the item is an audio file. Read-only.
|
||||
// Video *VideoFacet `json:"video"` // Video metadata, if the item is a video. Read-only.
|
||||
// Location *LocationFacet `json:"location"` // Location metadata, if the item has location data. Read-only.
|
||||
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
|
||||
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
|
||||
}
|
||||
|
||||
@@ -238,6 +250,28 @@ type MoveItemRequest struct {
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo,omitempty"` // File system information on client. Read-write.
|
||||
}
|
||||
|
||||
//CreateShareLinkRequest is the request to create a sharing link
|
||||
//Always Type:view and Scope:anonymous for public sharing
|
||||
type CreateShareLinkRequest struct {
|
||||
Type string `json:"type"` //Link type in View, Edit or Embed
|
||||
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization
|
||||
}
|
||||
|
||||
//CreateShareLinkResponse is the response from CreateShareLinkRequest
|
||||
type CreateShareLinkResponse struct {
|
||||
ID string `json:"id"`
|
||||
Roles []string `json:"roles"`
|
||||
Link struct {
|
||||
Type string `json:"type"`
|
||||
Scope string `json:"scope"`
|
||||
WebURL string `json:"webUrl"`
|
||||
Application struct {
|
||||
ID string `json:"id"`
|
||||
DisplayName string `json:"displayName"`
|
||||
} `json:"application"`
|
||||
} `json:"link"`
|
||||
}
|
||||
|
||||
// AsyncOperationStatus provides information on the status of a asynchronous job progress.
|
||||
//
|
||||
// The following API calls return AsyncOperationStatus resources:
|
||||
@@ -251,6 +285,7 @@ type AsyncOperationStatus struct {
|
||||
|
||||
// GetID returns a normalized ID of the item
|
||||
// If DriveID is known it will be prefixed to the ID with # seperator
|
||||
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
|
||||
func (i *Item) GetID() string {
|
||||
if i.IsRemote() && i.RemoteItem.ID != "" {
|
||||
return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID
|
||||
@@ -260,9 +295,9 @@ func (i *Item) GetID() string {
|
||||
return i.ID
|
||||
}
|
||||
|
||||
// GetDriveID returns a normalized ParentReferance of the item
|
||||
// GetDriveID returns a normalized ParentReference of the item
|
||||
func (i *Item) GetDriveID() string {
|
||||
return i.GetParentReferance().DriveID
|
||||
return i.GetParentReference().DriveID
|
||||
}
|
||||
|
||||
// GetName returns a normalized Name of the item
|
||||
@@ -281,6 +316,24 @@ func (i *Item) GetFolder() *FolderFacet {
|
||||
return i.Folder
|
||||
}
|
||||
|
||||
// GetPackage returns a normalized Package of the item
|
||||
func (i *Item) GetPackage() *PackageFacet {
|
||||
if i.IsRemote() && i.RemoteItem.Package != nil {
|
||||
return i.RemoteItem.Package
|
||||
}
|
||||
return i.Package
|
||||
}
|
||||
|
||||
// GetPackageType returns the package type of the item if available,
|
||||
// otherwise ""
|
||||
func (i *Item) GetPackageType() string {
|
||||
pack := i.GetPackage()
|
||||
if pack == nil {
|
||||
return ""
|
||||
}
|
||||
return pack.Type
|
||||
}
|
||||
|
||||
// GetFile returns a normalized File of the item
|
||||
func (i *Item) GetFile() *FileFacet {
|
||||
if i.IsRemote() && i.RemoteItem.File != nil {
|
||||
@@ -345,8 +398,8 @@ func (i *Item) GetLastModifiedDateTime() Timestamp {
|
||||
return i.LastModifiedDateTime
|
||||
}
|
||||
|
||||
// GetParentReferance returns a normalized ParentReferance of the item
|
||||
func (i *Item) GetParentReferance() *ItemReference {
|
||||
// GetParentReference returns a normalized ParentReference of the item
|
||||
func (i *Item) GetParentReference() *ItemReference {
|
||||
if i.IsRemote() && i.ParentReference == nil {
|
||||
return i.RemoteItem.ParentReference
|
||||
}
|
||||
|
||||
@@ -43,6 +43,8 @@ const (
|
||||
driveTypePersonal = "personal"
|
||||
driveTypeBusiness = "business"
|
||||
driveTypeSharepoint = "documentLibrary"
|
||||
defaultChunkSize = 10 * fs.MebiByte
|
||||
chunkSizeMultiple = 320 * fs.KibiByte
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -73,9 +75,8 @@ func init() {
|
||||
return
|
||||
}
|
||||
|
||||
// Are we running headless?
|
||||
if automatic, _ := m.Get(config.ConfigAutomatic); automatic != "" {
|
||||
// Yes, okay we are done
|
||||
// Stop if we are running non-interactive config
|
||||
if fs.Config.AutoConfirm {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -197,7 +198,7 @@ func init() {
|
||||
|
||||
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
|
||||
// This does not work, YET :)
|
||||
if !config.Confirm() {
|
||||
if !config.ConfirmWithConfig(m, "config_drive_ok", true) {
|
||||
log.Fatalf("Cancelled by user")
|
||||
}
|
||||
|
||||
@@ -212,9 +213,12 @@ func init() {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Microsoft App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Chunk size to upload files with - must be multiple of 320k.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k.
|
||||
|
||||
Above this size files will be chunked - must be multiple of 320k. Note
|
||||
that the chunks will be buffered into memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "drive_id",
|
||||
@@ -223,18 +227,30 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "drive_type",
|
||||
Help: "The type of the drive ( personal | business | documentLibrary )",
|
||||
Help: "The type of the drive ( " + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + " )",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "expose_onenote_files",
|
||||
Help: `Set to make OneNote files show up in directory listings.
|
||||
|
||||
By default rclone will hide OneNote files in directory listings because
|
||||
operations like "Open" and "Update" won't work on them. But this
|
||||
behaviour may also prevent you from deleting them. If you want to
|
||||
delete OneNote files or otherwise want them to show up in directory
|
||||
listing, set this option.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
}
|
||||
|
||||
// Fs represents a remote one drive
|
||||
@@ -255,15 +271,16 @@ type Fs struct {
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
hasMetaData bool // whether info below has been set
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
quickxorhash string // QuickXorHash of the object content
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
hasMetaData bool // whether info below has been set
|
||||
isOneNoteFile bool // Whether the object is a OneNote file
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
quickxorhash string // QuickXorHash of the object content
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -307,29 +324,19 @@ var retryErrorCodes = []int{
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
authRety := false
|
||||
authRetry := false
|
||||
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
authRety = true
|
||||
authRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||
var opts rest.Opts
|
||||
if len(path) == 0 {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
}
|
||||
}
|
||||
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
|
||||
// if `relPath` == "", it reads the metadata for the item with that ID.
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+rest.URLPathEscape(replaceReservedChars(relPath)))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -338,6 +345,72 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Respon
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path (relative to the absolute root)
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||
firstSlashIndex := strings.IndexRune(path, '/')
|
||||
|
||||
if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
|
||||
var opts rest.Opts
|
||||
if len(path) == 0 {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// The following branch handles the case when we're using OneDrive Personal and the path is in a folder.
|
||||
// For OneDrive Personal, we need to consider the "shared with me" folders.
|
||||
// An item in such a folder can only be addressed by its ID relative to the sharer's driveID or
|
||||
// by its path relative to the folder's ID relative to the sharer's driveID.
|
||||
// Note: A "shared with me" folder can only be placed in the sharee's absolute root.
|
||||
// So we read metadata relative to a suitable folder's normalized ID.
|
||||
var dirCacheFoundRoot bool
|
||||
var rootNormalizedID string
|
||||
if f.dirCache != nil {
|
||||
var ok bool
|
||||
if rootNormalizedID, ok = f.dirCache.Get(""); ok {
|
||||
dirCacheFoundRoot = true
|
||||
}
|
||||
}
|
||||
|
||||
relPath, insideRoot := getRelativePathInsideBase(f.root, path)
|
||||
var firstDir, baseNormalizedID string
|
||||
if !insideRoot || !dirCacheFoundRoot {
|
||||
// We do not have the normalized ID in dirCache for our query to base on. Query it manually.
|
||||
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
||||
info, resp, err := f.readMetaDataForPath(firstDir)
|
||||
if err != nil {
|
||||
return info, resp, err
|
||||
}
|
||||
baseNormalizedID = info.GetID()
|
||||
} else {
|
||||
if f.root != "" {
|
||||
// Read metadata based on root
|
||||
baseNormalizedID = rootNormalizedID
|
||||
} else {
|
||||
// Read metadata based on firstDir
|
||||
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
||||
baseNormalizedID, err = f.dirCache.FindDir(firstDir, false)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return f.readMetaDataForPathRelativeToID(baseNormalizedID, relPath)
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
// Decode error response
|
||||
@@ -352,6 +425,25 @@ func errorHandler(resp *http.Response) error {
|
||||
return errResponse
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs%chunkSizeMultiple != 0 {
|
||||
return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
|
||||
}
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -360,18 +452,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize%(320*1024) != 0 {
|
||||
return nil, errors.Errorf("chunk size %d is not a multiple of 320k", opt.ChunkSize)
|
||||
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "onedrive: chunk size")
|
||||
}
|
||||
|
||||
if opt.DriveID == "" || opt.DriveType == "" {
|
||||
log.Fatalf("Unable to get drive_id and drive_type. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend.")
|
||||
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure OneDrive")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
@@ -398,27 +492,27 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
// Get rootID
|
||||
rootInfo, _, err := f.readMetaDataForPath("")
|
||||
if err != nil || rootInfo.ID == "" {
|
||||
if err != nil || rootInfo.GetID() == "" {
|
||||
return nil, errors.Wrap(err, "failed to get root")
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, rootInfo.ID, f)
|
||||
f.dirCache = dircache.New(root, rootInfo.GetID(), f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, rootInfo.ID, &newF)
|
||||
newF.root = newRoot
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootInfo.ID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -426,8 +520,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -470,24 +569,20 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
parent, ok := f.dirCache.GetInv(pathID)
|
||||
_, ok := f.dirCache.GetInv(pathID)
|
||||
if !ok {
|
||||
return "", false, errors.New("couldn't find parent ID")
|
||||
}
|
||||
path := leaf
|
||||
if parent != "" {
|
||||
path = parent + "/" + path
|
||||
}
|
||||
if f.dirCache.FoundRoot() {
|
||||
path = f.rootSlash() + path
|
||||
}
|
||||
info, resp, err := f.readMetaDataForPath(path)
|
||||
info, resp, err := f.readMetaDataForPathRelativeToID(pathID, leaf)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return "", false, nil
|
||||
}
|
||||
return "", false, err
|
||||
}
|
||||
if info.GetPackageType() == api.PackageTypeOneNote {
|
||||
return "", false, errors.New("found OneNote file when looking for folder")
|
||||
}
|
||||
if info.GetFolder() == nil {
|
||||
return "", false, errors.New("found file when looking for folder")
|
||||
}
|
||||
@@ -596,6 +691,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
|
||||
fs.Debugf(info.Name, "OneNote file not shown in directory listing")
|
||||
return false
|
||||
}
|
||||
|
||||
remote := path.Join(dir, info.GetName())
|
||||
folder := info.GetFolder()
|
||||
if folder != nil {
|
||||
@@ -815,13 +915,13 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
|
||||
opts.NoResponse = true
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copyReq := api.CopyItemRequest{
|
||||
Name: &replacedLeaf,
|
||||
ParentReference: api.ItemReference{
|
||||
DriveID: f.driveID,
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
},
|
||||
}
|
||||
@@ -888,15 +988,23 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
_, srcObjDriveID, _ := parseNormalizedID(srcObj.id)
|
||||
|
||||
if dstDriveID != srcObjDriveID {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
// "Items cannot be moved between Drives using this request."
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Move the object
|
||||
opts := newOptsCall(srcObj.id, "PATCH", "")
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
ID: id,
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
@@ -972,7 +1080,20 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parsedDstDirID, _, _ := parseDirID(dstDirectoryID)
|
||||
parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, srcDriveID, _ := parseNormalizedID(srcID)
|
||||
|
||||
if dstDriveID != srcDriveID {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
// "Items cannot be moved between Drives using this request."
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
@@ -986,14 +1107,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get timestamps of src so they can be preserved
|
||||
srcInfo, _, err := srcFs.readMetaDataForPath(srcPath)
|
||||
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(srcID, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1003,7 +1118,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
ID: parsedDstDirID,
|
||||
DriveID: dstDriveID,
|
||||
ID: parsedDstDirID,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
@@ -1064,6 +1180,32 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.QuickXorHash)
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without accout.
|
||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
info, _, err := f.readMetaDataForPath(f.srvPath(remote))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
opts := newOptsCall(info.GetID(), "POST", "/createLink")
|
||||
|
||||
share := api.CreateShareLinkRequest{
|
||||
Type: "view",
|
||||
Scope: "anonymous",
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var result api.CreateShareLinkResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &share, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return "", err
|
||||
}
|
||||
return result.Link.WebURL, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -1084,9 +1226,14 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server given a remote
|
||||
func (f *Fs) srvPath(remote string) string {
|
||||
return replaceReservedChars(f.rootSlash() + remote)
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
func (o *Object) srvPath() string {
|
||||
return replaceReservedChars(o.fs.rootSlash() + o.remote)
|
||||
return o.fs.srvPath(o.remote)
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
@@ -1121,6 +1268,8 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = info.GetSize()
|
||||
|
||||
o.isOneNoteFile = info.GetPackageType() == api.PackageTypeOneNote
|
||||
|
||||
// Docs: https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/hashes
|
||||
//
|
||||
// We use SHA1 for onedrive personal and QuickXorHash for onedrive for business
|
||||
@@ -1185,13 +1334,13 @@ func (o *Object) ModTime() time.Time {
|
||||
// setModTime sets the modification time of the local fs object
|
||||
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()),
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
@@ -1232,6 +1381,10 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.id == "" {
|
||||
return nil, errors.New("can't download - no id")
|
||||
}
|
||||
if o.isOneNoteFile {
|
||||
return nil, errors.New("can't open a OneNote file")
|
||||
}
|
||||
|
||||
fs.FixRangeOption(options, o.size)
|
||||
var resp *http.Response
|
||||
opts := newOptsCall(o.id, "GET", "/content")
|
||||
@@ -1255,7 +1408,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
id, drive, rootURL := parseDirID(directoryID)
|
||||
id, drive, rootURL := parseNormalizedID(directoryID)
|
||||
var opts rest.Opts
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
@@ -1275,6 +1428,12 @@ func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUpl
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &createRequest, &response)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
|
||||
// Make the error more user-friendly
|
||||
err = errors.New(err.Error() + " (is it a OneNote file?)")
|
||||
}
|
||||
}
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return response, err
|
||||
@@ -1329,7 +1488,7 @@ func (o *Object) cancelUploadSession(url string) (err error) {
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
if size <= 0 {
|
||||
panic("size passed into uploadMultipart must be > 0")
|
||||
return nil, errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
|
||||
// Create upload session
|
||||
@@ -1376,19 +1535,19 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
|
||||
// This function will set modtime after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||
panic("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
var resp *http.Response
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
@@ -1401,12 +1560,14 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
|
||||
}
|
||||
}
|
||||
|
||||
if size == 0 {
|
||||
opts.Body = nil
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
|
||||
// Make the error more user-friendly
|
||||
err = errors.New(err.Error() + " (is it a OneNote file?)")
|
||||
}
|
||||
}
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1425,6 +1586,10 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if o.hasMetaData && o.isOneNoteFile {
|
||||
return errors.New("can't upload content to a OneNote file")
|
||||
}
|
||||
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
|
||||
@@ -1437,7 +1602,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
} else if size == 0 {
|
||||
info, err = o.uploadSinglepart(in, size, modTime)
|
||||
} else {
|
||||
panic("src file size must be >= 0")
|
||||
return errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1461,8 +1626,8 @@ func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
func newOptsCall(id string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseDirID(id)
|
||||
func newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseNormalizedID(normalizedID)
|
||||
|
||||
if drive != "" {
|
||||
return rest.Opts{
|
||||
@@ -1477,7 +1642,10 @@ func newOptsCall(id string, method string, route string) (opts rest.Opts) {
|
||||
}
|
||||
}
|
||||
|
||||
func parseDirID(ID string) (string, string, string) {
|
||||
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
|
||||
// and returns itemID, driveID, rootURL.
|
||||
// Such a normalized ID can come from (*Item).GetID()
|
||||
func parseNormalizedID(ID string) (string, string, string) {
|
||||
if strings.Index(ID, "#") >= 0 {
|
||||
s := strings.Split(ID, "#")
|
||||
return s[1], s[0], graphURL + "/drives"
|
||||
@@ -1485,6 +1653,21 @@ func parseDirID(ID string) (string, string, string) {
|
||||
return ID, "", ""
|
||||
}
|
||||
|
||||
// getRelativePathInsideBase checks if `target` is inside `base`. If so, it
|
||||
// returns a relative path for `target` based on `base` and a boolean `true`.
|
||||
// Otherwise returns "", false.
|
||||
func getRelativePathInsideBase(base, target string) (string, bool) {
|
||||
if base == "" {
|
||||
return target, true
|
||||
}
|
||||
|
||||
baseSlash := base + "/"
|
||||
if strings.HasPrefix(target+"/", baseSlash) {
|
||||
return target[len(baseSlash):], true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1494,6 +1677,7 @@ var (
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test OneDrive filesystem interface
|
||||
package onedrive_test
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/onedrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,15 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestOneDrive:",
|
||||
NilObject: (*onedrive.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
CeilChunkSize: fstests.NextMultipleOf(chunkSizeMultiple),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -20,6 +21,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/dircache"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -117,7 +119,7 @@ func (f *Fs) DirCacheFlush() {
|
||||
f.dirCache.ResetRoot()
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -177,17 +179,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, "0", &newF)
|
||||
newF.root = newRoot
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, "0", &tempF)
|
||||
tempF.root = newRoot
|
||||
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -195,8 +197,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -778,7 +785,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
remote := path.Join(dir, folder.Name)
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, folder.FolderID)
|
||||
d := fs.NewDir(remote, time.Unix(int64(folder.DateModified), 0)).SetID(folder.FolderID)
|
||||
d := fs.NewDir(remote, time.Unix(folder.DateModified, 0)).SetID(folder.FolderID)
|
||||
d.SetItems(int64(folder.ChildFolders))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
@@ -925,8 +932,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// resp.Body.Close()
|
||||
// fs.Debugf(nil, "PostOpen: %#v", openResponse)
|
||||
|
||||
// 1 MB chunks size
|
||||
// 10 MB chunks size
|
||||
chunkSize := int64(1024 * 1024 * 10)
|
||||
buf := make([]byte, int(chunkSize))
|
||||
chunkOffset := int64(0)
|
||||
remainingBytes := size
|
||||
chunkCounter := 0
|
||||
@@ -939,14 +947,19 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
remainingBytes -= currentChunkSize
|
||||
fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes)
|
||||
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, currentChunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
var formBody bytes.Buffer
|
||||
w := multipart.NewWriter(&formBody)
|
||||
fw, err := w.CreateFormFile("file_data", o.remote)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if _, err = io.CopyN(fw, in, currentChunkSize); err != nil {
|
||||
if _, err = io.Copy(fw, chunk); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Add session_id
|
||||
@@ -1077,7 +1090,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + rest.URLPathEscape(replaceReservedChars(leaf)),
|
||||
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + url.QueryEscape(replaceReservedChars(leaf)),
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
|
||||
@@ -13,7 +13,7 @@ type Error struct {
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
// Error satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (Error %d)", e.Info.Message, e.Info.Code)
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ type Error struct {
|
||||
ErrorString string `json:"error"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("pcloud error: %s (%d)", e.ErrorString, e.Result)
|
||||
}
|
||||
@@ -58,7 +58,7 @@ func (e *Error) Update(err error) error {
|
||||
return e
|
||||
}
|
||||
|
||||
// Check Error statisfies the error interface
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||
@@ -161,7 +161,6 @@ type UserInfo struct {
|
||||
PublicLinkQuota int64 `json:"publiclinkquota"`
|
||||
Email string `json:"email"`
|
||||
UserID int `json:"userid"`
|
||||
Result int `json:"result"`
|
||||
Quota int64 `json:"quota"`
|
||||
TrashRevretentionDays int `json:"trashrevretentiondays"`
|
||||
Premium bool `json:"premium"`
|
||||
|
||||
@@ -246,7 +246,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Pcloud: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Pcloud")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
@@ -276,16 +276,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, rootID, &newF)
|
||||
newF.root = newRoot
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -293,8 +293,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -380,7 +385,7 @@ func fileIDtoNumber(fileID string) string {
|
||||
if len(fileID) > 0 && fileID[0] == 'f' {
|
||||
return fileID[1:]
|
||||
}
|
||||
fs.Debugf(nil, "Invalid filee id %q", fileID)
|
||||
fs.Debugf(nil, "Invalid file id %q", fileID)
|
||||
return fileID
|
||||
}
|
||||
|
||||
|
||||
@@ -69,17 +69,57 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "connection_retries",
|
||||
Help: "Number of connnection retries.",
|
||||
Help: "Number of connection retries.",
|
||||
Default: 3,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
NB if you set this to > 1 then the checksums of multpart uploads
|
||||
become corrupted (the uploads themselves are not corrupted though).
|
||||
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: 1,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Constants
|
||||
const (
|
||||
listLimitSize = 1000 // Number of items to read at once
|
||||
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
|
||||
listLimitSize = 1000 // Number of items to read at once
|
||||
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
|
||||
minChunkSize = fs.SizeSuffix(minMultiPartSize)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -92,12 +132,15 @@ func timestampToTime(tp int64) time.Time {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
}
|
||||
|
||||
// Fs represents a remote qingstor server
|
||||
@@ -227,6 +270,36 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
return qs.Init(cf)
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -235,6 +308,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: chunk size")
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: upload cutoff")
|
||||
}
|
||||
bucket, key, err := qsParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -368,7 +449,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
_, err = bucketInit.PutObject(key, &req)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Copied Faild, API Error: %v", err)
|
||||
fs.Debugf(f, "Copy Failed, API Error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(remote)
|
||||
@@ -675,7 +756,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
}
|
||||
switch *statistics.Status {
|
||||
case "deleted":
|
||||
fs.Debugf(f, "Wiat for qingstor sync bucket status, retries: %d", retries)
|
||||
fs.Debugf(f, "Wait for qingstor sync bucket status, retries: %d", retries)
|
||||
time.Sleep(time.Second * 1)
|
||||
retries++
|
||||
continue
|
||||
@@ -794,7 +875,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
fs.Debugf(o, "Read metadata of key: %s", key)
|
||||
resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{})
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Read metadata faild, API Error: %v", err)
|
||||
fs.Debugf(o, "Read metadata failed, API Error: %v", err)
|
||||
if e, ok := err.(*qsErr.QingStorError); ok {
|
||||
if e.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
@@ -913,16 +994,24 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
mimeType := fs.MimeType(src)
|
||||
|
||||
req := uploadInput{
|
||||
body: in,
|
||||
qsSvc: o.fs.svc,
|
||||
bucket: o.fs.bucket,
|
||||
zone: o.fs.zone,
|
||||
key: key,
|
||||
mimeType: mimeType,
|
||||
body: in,
|
||||
qsSvc: o.fs.svc,
|
||||
bucket: o.fs.bucket,
|
||||
zone: o.fs.zone,
|
||||
key: key,
|
||||
mimeType: mimeType,
|
||||
partSize: int64(o.fs.opt.ChunkSize),
|
||||
concurrency: o.fs.opt.UploadConcurrency,
|
||||
}
|
||||
uploader := newUploader(&req)
|
||||
|
||||
err = uploader.upload()
|
||||
size := src.Size()
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if multipart {
|
||||
err = uploader.upload()
|
||||
} else {
|
||||
err = uploader.singlePartUpload(in, size)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
// +build !plan9
|
||||
|
||||
package qingstor_test
|
||||
package qingstor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/qingstor"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -15,6 +15,19 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestQingStor:",
|
||||
NilObject: (*qingstor.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -143,7 +143,7 @@ func (u *uploader) init() {
|
||||
|
||||
// Try to adjust partSize if it is too small and account for
|
||||
// integer division truncation.
|
||||
if u.totalSize/u.cfg.partSize >= int64(u.cfg.partSize) {
|
||||
if u.totalSize/u.cfg.partSize >= u.cfg.partSize {
|
||||
// Add one to the part size to account for remainders
|
||||
// during the size calculation. e.g odd number of bytes.
|
||||
u.cfg.partSize = (u.totalSize / int64(u.cfg.maxUploadParts)) + 1
|
||||
@@ -152,18 +152,18 @@ func (u *uploader) init() {
|
||||
}
|
||||
|
||||
// singlePartUpload upload a single object that contentLength less than "defaultUploadPartSize"
|
||||
func (u *uploader) singlePartUpload(buf io.ReadSeeker) error {
|
||||
func (u *uploader) singlePartUpload(buf io.Reader, size int64) error {
|
||||
bucketInit, _ := u.bucketInit()
|
||||
|
||||
req := qs.PutObjectInput{
|
||||
ContentLength: &u.readerPos,
|
||||
ContentLength: &size,
|
||||
ContentType: &u.cfg.mimeType,
|
||||
Body: buf,
|
||||
}
|
||||
|
||||
_, err := bucketInit.PutObject(u.cfg.key, &req)
|
||||
if err == nil {
|
||||
fs.Debugf(u, "Upload single objcet finished")
|
||||
fs.Debugf(u, "Upload single object finished")
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -179,13 +179,13 @@ func (u *uploader) upload() error {
|
||||
// Do one read to determine if we have more than one part
|
||||
reader, _, err := u.nextReader()
|
||||
if err == io.EOF { // single part
|
||||
fs.Debugf(u, "Tried to upload a singile object to QingStor")
|
||||
return u.singlePartUpload(reader)
|
||||
fs.Debugf(u, "Uploading as single part object to QingStor")
|
||||
return u.singlePartUpload(reader, u.readerPos)
|
||||
} else if err != nil {
|
||||
return errors.Errorf("read upload data failed: %s", err)
|
||||
}
|
||||
|
||||
fs.Debugf(u, "Treied to upload a multi-part object to QingStor")
|
||||
fs.Debugf(u, "Uploading as multi-part object to QingStor")
|
||||
mu := multiUploader{uploader: u}
|
||||
return mu.multiPartUpload(reader)
|
||||
}
|
||||
@@ -261,7 +261,7 @@ func (mu *multiUploader) initiate() error {
|
||||
req := qs.InitiateMultipartUploadInput{
|
||||
ContentType: &mu.cfg.mimeType,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to initiate a multi-part upload")
|
||||
fs.Debugf(mu, "Initiating a multi-part upload")
|
||||
rsp, err := bucketInit.InitiateMultipartUpload(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
mu.uploadID = rsp.UploadID
|
||||
@@ -279,12 +279,12 @@ func (mu *multiUploader) send(c chunk) error {
|
||||
ContentLength: &c.size,
|
||||
Body: c.buffer,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to upload a part to QingStor that partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
fs.Debugf(mu, "Uploading a part to QingStor with partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
_, err := bucketInit.UploadMultipart(mu.cfg.key, &req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debugf(mu, "Upload part finished that partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
fs.Debugf(mu, "Done uploading part partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
|
||||
mu.mtx.Lock()
|
||||
defer mu.mtx.Unlock()
|
||||
@@ -304,7 +304,7 @@ func (mu *multiUploader) list() error {
|
||||
req := qs.ListMultipartInput{
|
||||
UploadID: mu.uploadID,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to list a multi-part")
|
||||
fs.Debugf(mu, "Reading multi-part details")
|
||||
rsp, err := bucketInit.ListMultipart(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
mu.objectParts = rsp.ObjectParts
|
||||
@@ -331,7 +331,7 @@ func (mu *multiUploader) complete() error {
|
||||
ObjectParts: mu.objectParts,
|
||||
ETag: &md5String,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to complete a multi-part")
|
||||
fs.Debugf(mu, "Completing multi-part object")
|
||||
_, err = bucketInit.CompleteMultipartUpload(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
fs.Debugf(mu, "Complete multi-part finished")
|
||||
@@ -348,7 +348,7 @@ func (mu *multiUploader) abort() error {
|
||||
req := qs.AbortMultipartUploadInput{
|
||||
UploadID: uploadID,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to abort a multi-part")
|
||||
fs.Debugf(mu, "Aborting multi-part object %q", *uploadID)
|
||||
_, err = bucketInit.AbortMultipartUpload(mu.cfg.key, &req)
|
||||
}
|
||||
|
||||
@@ -392,6 +392,14 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) error {
|
||||
var nextChunkLen int
|
||||
reader, nextChunkLen, err = mu.nextReader()
|
||||
if err != nil && err != io.EOF {
|
||||
// empty ch
|
||||
go func() {
|
||||
for range ch {
|
||||
}
|
||||
}()
|
||||
// Wait for all goroutines finish
|
||||
close(ch)
|
||||
mu.wg.Wait()
|
||||
return err
|
||||
}
|
||||
if nextChunkLen == 0 && partNumber > 0 {
|
||||
|
||||
591
backend/s3/s3.go
591
backend/s3/s3.go
@@ -53,7 +53,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)",
|
||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
@@ -61,6 +61,9 @@ func init() {
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "AWS",
|
||||
Help: "Amazon Web Services (AWS) S3",
|
||||
}, {
|
||||
Value: "Alibaba",
|
||||
Help: "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
|
||||
}, {
|
||||
Value: "Ceph",
|
||||
Help: "Ceph Object Storage",
|
||||
@@ -76,6 +79,9 @@ func init() {
|
||||
}, {
|
||||
Value: "Minio",
|
||||
Help: "Minio Object Storage",
|
||||
}, {
|
||||
Value: "Netease",
|
||||
Help: "Netease Object Storage (NOS)",
|
||||
}, {
|
||||
Value: "Wasabi",
|
||||
Help: "Wasabi Object Storage",
|
||||
@@ -125,6 +131,9 @@ func init() {
|
||||
}, {
|
||||
Value: "eu-west-2",
|
||||
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
|
||||
}, {
|
||||
Value: "eu-north-1",
|
||||
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
|
||||
}, {
|
||||
Value: "eu-central-1",
|
||||
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
|
||||
@@ -150,7 +159,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS",
|
||||
Provider: "!AWS,Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
||||
@@ -228,10 +237,10 @@ func init() {
|
||||
Help: "EU Cross Region Amsterdam Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.eu-gb.objectstorage.softlayer.net",
|
||||
Help: "Great Britan Endpoint",
|
||||
Help: "Great Britain Endpoint",
|
||||
}, {
|
||||
Value: "s3.eu-gb.objectstorage.service.networklayer.com",
|
||||
Help: "Great Britan Private Endpoint",
|
||||
Help: "Great Britain Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.ap-geo.objectstorage.softlayer.net",
|
||||
Help: "APAC Cross Regional Endpoint",
|
||||
@@ -269,12 +278,75 @@ func init() {
|
||||
Value: "s3.tor01.objectstorage.service.networklayer.com",
|
||||
Help: "Toronto Single Site Private Endpoint",
|
||||
}},
|
||||
}, {
|
||||
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for OSS API.",
|
||||
Provider: "Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "oss-cn-hangzhou.aliyuncs.com",
|
||||
Help: "East China 1 (Hangzhou)",
|
||||
}, {
|
||||
Value: "oss-cn-shanghai.aliyuncs.com",
|
||||
Help: "East China 2 (Shanghai)",
|
||||
}, {
|
||||
Value: "oss-cn-qingdao.aliyuncs.com",
|
||||
Help: "North China 1 (Qingdao)",
|
||||
}, {
|
||||
Value: "oss-cn-beijing.aliyuncs.com",
|
||||
Help: "North China 2 (Beijing)",
|
||||
}, {
|
||||
Value: "oss-cn-zhangjiakou.aliyuncs.com",
|
||||
Help: "North China 3 (Zhangjiakou)",
|
||||
}, {
|
||||
Value: "oss-cn-huhehaote.aliyuncs.com",
|
||||
Help: "North China 5 (Huhehaote)",
|
||||
}, {
|
||||
Value: "oss-cn-shenzhen.aliyuncs.com",
|
||||
Help: "South China 1 (Shenzhen)",
|
||||
}, {
|
||||
Value: "oss-cn-hongkong.aliyuncs.com",
|
||||
Help: "Hong Kong (Hong Kong)",
|
||||
}, {
|
||||
Value: "oss-us-west-1.aliyuncs.com",
|
||||
Help: "US West 1 (Silicon Valley)",
|
||||
}, {
|
||||
Value: "oss-us-east-1.aliyuncs.com",
|
||||
Help: "US East 1 (Virginia)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-1.aliyuncs.com",
|
||||
Help: "Southeast Asia Southeast 1 (Singapore)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-2.aliyuncs.com",
|
||||
Help: "Asia Pacific Southeast 2 (Sydney)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-3.aliyuncs.com",
|
||||
Help: "Southeast Asia Southeast 3 (Kuala Lumpur)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-5.aliyuncs.com",
|
||||
Help: "Asia Pacific Southeast 5 (Jakarta)",
|
||||
}, {
|
||||
Value: "oss-ap-northeast-1.aliyuncs.com",
|
||||
Help: "Asia Pacific Northeast 1 (Japan)",
|
||||
}, {
|
||||
Value: "oss-ap-south-1.aliyuncs.com",
|
||||
Help: "Asia Pacific South 1 (Mumbai)",
|
||||
}, {
|
||||
Value: "oss-eu-central-1.aliyuncs.com",
|
||||
Help: "Central Europe 1 (Frankfurt)",
|
||||
}, {
|
||||
Value: "oss-eu-west-1.aliyuncs.com",
|
||||
Help: "West Europe (London)",
|
||||
}, {
|
||||
Value: "oss-me-east-1.aliyuncs.com",
|
||||
Help: "Middle East 1 (Dubai)",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS",
|
||||
Provider: "!AWS,IBMCOS,Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-west-1.dream.io",
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
Provider: "Dreamhost",
|
||||
}, {
|
||||
@@ -291,7 +363,11 @@ func init() {
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "s3.wasabisys.com",
|
||||
Help: "Wasabi Object Storage",
|
||||
Help: "Wasabi US East endpoint",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.us-west-1.wasabisys.com",
|
||||
Help: "Wasabi US West endpoint",
|
||||
Provider: "Wasabi",
|
||||
}},
|
||||
}, {
|
||||
@@ -319,6 +395,9 @@ func init() {
|
||||
}, {
|
||||
Value: "eu-west-2",
|
||||
Help: "EU (London) Region.",
|
||||
}, {
|
||||
Value: "eu-north-1",
|
||||
Help: "EU (Stockholm) Region.",
|
||||
}, {
|
||||
Value: "EU",
|
||||
Help: "EU Region.",
|
||||
@@ -371,7 +450,7 @@ func init() {
|
||||
Help: "US East Region Flex",
|
||||
}, {
|
||||
Value: "us-south-standard",
|
||||
Help: "US Sout hRegion Standard",
|
||||
Help: "US South Region Standard",
|
||||
}, {
|
||||
Value: "us-south-vault",
|
||||
Help: "US South Region Vault",
|
||||
@@ -395,16 +474,16 @@ func init() {
|
||||
Help: "EU Cross Region Flex",
|
||||
}, {
|
||||
Value: "eu-gb-standard",
|
||||
Help: "Great Britan Standard",
|
||||
Help: "Great Britain Standard",
|
||||
}, {
|
||||
Value: "eu-gb-vault",
|
||||
Help: "Great Britan Vault",
|
||||
Help: "Great Britain Vault",
|
||||
}, {
|
||||
Value: "eu-gb-cold",
|
||||
Help: "Great Britan Cold",
|
||||
Help: "Great Britain Cold",
|
||||
}, {
|
||||
Value: "eu-gb-flex",
|
||||
Help: "Great Britan Flex",
|
||||
Help: "Great Britain Flex",
|
||||
}, {
|
||||
Value: "ap-standard",
|
||||
Help: "APAC Standard",
|
||||
@@ -445,10 +524,17 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS",
|
||||
Provider: "!AWS,IBMCOS,Alibaba",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: "Canned ACL used when creating buckets and/or storing objects in S3.\nFor more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
|
||||
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
|
||||
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when server side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
@@ -490,6 +576,28 @@ func init() {
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
|
||||
Provider: "IBMCOS",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_acl",
|
||||
Help: `Canned ACL used when creating buckets.
|
||||
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when only when creating buckets. If it
|
||||
isn't set then "acl" is used instead.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
}, {
|
||||
Value: "public-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
||||
}, {
|
||||
Value: "public-read-write",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
|
||||
}, {
|
||||
Value: "authenticated-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "server_side_encryption",
|
||||
Help: "The server-side encryption algorithm used when storing this object in S3.",
|
||||
@@ -517,7 +625,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing objects in S3.",
|
||||
Help: "The storage class to use when storing new objects in S3.",
|
||||
Provider: "AWS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
@@ -534,11 +642,49 @@ func init() {
|
||||
}, {
|
||||
Value: "ONEZONE_IA",
|
||||
Help: "One Zone Infrequent Access storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Glacier storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Chunk size to use for uploading",
|
||||
Default: fs.SizeSuffix(s3manager.MinUploadPartSize),
|
||||
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in OSS.",
|
||||
Provider: "Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Default",
|
||||
}, {
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Archive storage mode.",
|
||||
}, {
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode.",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--s3-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
@@ -548,31 +694,57 @@ func init() {
|
||||
}, {
|
||||
Name: "session_token",
|
||||
Help: "An AWS session token",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: "Concurrency for multipart uploads.",
|
||||
Default: 2,
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: 4,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "force_path_style",
|
||||
Help: "If true use path style access if false use virtual hosted style.\nSome providers (eg Aliyun OSS or Netease COS) require this.",
|
||||
Name: "force_path_style",
|
||||
Help: `If true use path style access if false use virtual hosted style.
|
||||
|
||||
If this is true (the default) then rclone will use path style access,
|
||||
if false then rclone will use virtual path style. See [the AWS S3
|
||||
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
||||
for more info.
|
||||
|
||||
Some providers (eg Aliyun OSS or Netease COS) require this set to false.`,
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "v2_auth",
|
||||
Help: `If true use v2 authentication.
|
||||
|
||||
If this is false (the default) then rclone will use v4 authentication.
|
||||
If it is set then rclone will use v2 authentication.
|
||||
|
||||
Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Constants
|
||||
const (
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -585,14 +757,17 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ForcePathStyle bool `config:"force_path_style"`
|
||||
V2Auth bool `config:"v2_auth"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -608,6 +783,7 @@ type Fs struct {
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
}
|
||||
|
||||
// Object describes a s3 object
|
||||
@@ -656,23 +832,31 @@ func (f *Fs) Features() *fs.Features {
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
|
||||
var retryErrorCodes = []int{
|
||||
409, // Conflict - various states that could be resolved on a retry
|
||||
// 409, // Conflict - various states that could be resolved on a retry
|
||||
503, // Service Unavailable/Slow Down - "Reduce your request rate"
|
||||
}
|
||||
|
||||
//S3 is pretty resilient, and the built in retry handling is probably sufficient
|
||||
// as it should notice closed connections and timeouts which are the most likely
|
||||
// sort of failure modes
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
|
||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
// If this is an awserr object, try and extract more useful information to determine if we should retry
|
||||
if awsError, ok := err.(awserr.Error); ok {
|
||||
// Simple case, check the original embedded error in case it's generically retriable
|
||||
// Simple case, check the original embedded error in case it's generically retryable
|
||||
if fserrors.ShouldRetry(awsError.OrigErr()) {
|
||||
return true, err
|
||||
}
|
||||
//Failing that, if it's a RequestFailure it's probably got an http status code we can check
|
||||
// Failing that, if it's a RequestFailure it's probably got an http status code we can check
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// 301 if wrong region for bucket
|
||||
if reqErr.StatusCode() == http.StatusMovedPermanently {
|
||||
urfbErr := f.updateRegionForBucket()
|
||||
if urfbErr != nil {
|
||||
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
|
||||
return false, err
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
for _, e := range retryErrorCodes {
|
||||
if reqErr.StatusCode() == e {
|
||||
return true, err
|
||||
@@ -680,7 +864,7 @@ func shouldRetry(err error) (bool, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
//Ok, not an awserr, check for generic failure conditions
|
||||
// Ok, not an awserr, check for generic failure conditions
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
@@ -757,17 +941,38 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
if opt.Region == "" {
|
||||
opt.Region = "us-east-1"
|
||||
}
|
||||
if opt.Provider == "Alibaba" || opt.Provider == "Netease" {
|
||||
opt.ForcePathStyle = false
|
||||
}
|
||||
awsConfig := aws.NewConfig().
|
||||
WithRegion(opt.Region).
|
||||
WithMaxRetries(maxRetries).
|
||||
WithCredentials(cred).
|
||||
WithEndpoint(opt.Endpoint).
|
||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle)
|
||||
if opt.Region != "" {
|
||||
awsConfig.WithRegion(opt.Region)
|
||||
}
|
||||
if opt.Endpoint != "" {
|
||||
awsConfig.WithEndpoint(opt.Endpoint)
|
||||
}
|
||||
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
ses := session.New()
|
||||
c := s3.New(ses, awsConfig)
|
||||
if opt.Region == "other-v2-signature" {
|
||||
awsSessionOpts := session.Options{
|
||||
Config: *awsConfig,
|
||||
}
|
||||
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
|
||||
// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
|
||||
awsSessionOpts.SharedConfigState = session.SharedConfigEnable
|
||||
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
|
||||
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
|
||||
awsSessionOpts.Config.Credentials = nil
|
||||
}
|
||||
ses, err := session.NewSessionWithOptions(awsSessionOpts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
c := s3.New(ses)
|
||||
if opt.V2Auth || opt.Region == "other-v2-signature" {
|
||||
fs.Debugf(nil, "Using v2 auth")
|
||||
signer := func(req *request.Request) {
|
||||
// Ignore AnonymousCredentials object
|
||||
@@ -783,6 +988,36 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
return c, ses, nil
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -791,13 +1026,24 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) {
|
||||
return nil, errors.Errorf("s3 chunk size (%v) must be >= %v", opt.ChunkSize, fs.SizeSuffix(s3manager.MinUploadPartSize))
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "s3: chunk size")
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "s3: upload cutoff")
|
||||
}
|
||||
bucket, directory, err := s3ParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ACL == "" {
|
||||
opt.ACL = "private"
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = opt.ACL
|
||||
}
|
||||
c, ses, err := s3Connection(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -810,6 +1056,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
bucket: bucket,
|
||||
ses: ses,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -825,7 +1072,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.HeadObject(&req)
|
||||
return shouldRetry(err)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.root = path.Dir(directory)
|
||||
@@ -875,6 +1122,51 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// Gets the bucket location
|
||||
func (f *Fs) getBucketLocation() (string, error) {
|
||||
req := s3.GetBucketLocationInput{
|
||||
Bucket: &f.bucket,
|
||||
}
|
||||
var resp *s3.GetBucketLocationOutput
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.GetBucketLocation(&req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil
|
||||
}
|
||||
|
||||
// Updates the region for the bucket by reading the region from the
|
||||
// bucket then updating the session.
|
||||
func (f *Fs) updateRegionForBucket() error {
|
||||
region, err := f.getBucketLocation()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reading bucket location failed")
|
||||
}
|
||||
if aws.StringValue(f.c.Config.Endpoint) != "" {
|
||||
return errors.Errorf("can't set region to %q as endpoint is set", region)
|
||||
}
|
||||
if aws.StringValue(f.c.Config.Region) == region {
|
||||
return errors.Errorf("region is already %q - not updating", region)
|
||||
}
|
||||
|
||||
// Make a new session with the new region
|
||||
oldRegion := f.opt.Region
|
||||
f.opt.Region = region
|
||||
c, ses, err := s3Connection(&f.opt)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "creating new session failed")
|
||||
}
|
||||
f.c = c
|
||||
f.ses = ses
|
||||
|
||||
fs.Logf(f, "Switched region to %q from %q", region, oldRegion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
type listFn func(remote string, object *s3.Object, isDirectory bool) error
|
||||
|
||||
@@ -907,7 +1199,7 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.ListObjects(&req)
|
||||
return shouldRetry(err)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||
@@ -1036,7 +1328,7 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
var resp *s3.ListBucketsOutput
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.ListBuckets(&req)
|
||||
return shouldRetry(err)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1124,7 +1416,7 @@ func (f *Fs) dirExists() (bool, error) {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.HeadBucket(&req)
|
||||
return shouldRetry(err)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
@@ -1155,7 +1447,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
}
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.opt.ACL,
|
||||
ACL: &f.opt.BucketACL,
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
@@ -1164,7 +1456,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CreateBucket(&req)
|
||||
return shouldRetry(err)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
if err.Code() == "BucketAlreadyOwnedByYou" {
|
||||
@@ -1174,6 +1466,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
if err == nil {
|
||||
f.bucketOK = true
|
||||
f.bucketDeleted = false
|
||||
fs.Infof(f, "Bucket created with ACL %q", *req.ACL)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1192,11 +1485,12 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.DeleteBucket(&req)
|
||||
return shouldRetry(err)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = false
|
||||
f.bucketDeleted = true
|
||||
fs.Infof(f, "Bucket deleted")
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1236,13 +1530,23 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
source := pathEscape(srcFs.bucket + "/" + srcFs.root + srcObj.remote)
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.opt.ACL,
|
||||
Key: &key,
|
||||
CopySource: &source,
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
}
|
||||
if f.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &f.opt.ServerSideEncryption
|
||||
}
|
||||
if f.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
|
||||
}
|
||||
if f.opt.StorageClass != "" {
|
||||
req.StorageClass = &f.opt.StorageClass
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.CopyObject(&req)
|
||||
return shouldRetry(err)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1324,7 +1628,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = o.fs.c.HeadObject(&req)
|
||||
return shouldRetry(err)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||
@@ -1409,9 +1713,18 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
Metadata: o.meta,
|
||||
MetadataDirective: &directive,
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := o.fs.c.CopyObject(&req)
|
||||
return shouldRetry(err)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -1443,7 +1756,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = o.fs.c.GetObject(&req)
|
||||
return shouldRetry(err)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err, ok := err.(awserr.RequestFailure); ok {
|
||||
if err.Code() == "InvalidObjectState" {
|
||||
@@ -1465,38 +1778,46 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
modTime := src.ModTime()
|
||||
size := src.Size()
|
||||
|
||||
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
var uploader *s3manager.Uploader
|
||||
if multipart {
|
||||
uploader = s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
// S3 file size limit. Rounded up to nearest MB.
|
||||
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
return
|
||||
}
|
||||
// Adjust PartSize until the number of parts is small enough.
|
||||
if size/u.PartSize >= s3manager.MaxUploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
}
|
||||
})
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
// S3 file size limit. Rounded up to nearest MB.
|
||||
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
return
|
||||
}
|
||||
// Adjust PartSize until the number of parts is small enough.
|
||||
if size/u.PartSize >= s3manager.MaxUploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Set the mtime in the meta data
|
||||
metadata := map[string]*string{
|
||||
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
||||
}
|
||||
|
||||
if !o.fs.opt.DisableChecksum && size > uploader.PartSize {
|
||||
// read the md5sum if available for non multpart and if
|
||||
// disable checksum isn't present.
|
||||
var md5sum string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
hash, err := src.Hash(hash.MD5)
|
||||
|
||||
if err == nil && matchMd5.MatchString(hash) {
|
||||
hashBytes, err := hex.DecodeString(hash)
|
||||
|
||||
if err == nil {
|
||||
metadata[metaMD5Hash] = aws.String(base64.StdEncoding.EncodeToString(hashBytes))
|
||||
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if multipart {
|
||||
metadata[metaMD5Hash] = &md5sum
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1505,30 +1826,98 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
mimeType := fs.MimeType(src)
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = uploader.Upload(&req)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
if multipart {
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = uploader.Upload(&req)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
}
|
||||
if md5sum != "" {
|
||||
req.ContentMD5 = &md5sum
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
|
||||
// Create the request
|
||||
putObj, _ := o.fs.c.PutObjectRequest(&req)
|
||||
|
||||
// Sign it so we can upload using a presigned request.
|
||||
//
|
||||
// Note the SDK doesn't currently support streaming to
|
||||
// PutObject so we'll use this work-around.
|
||||
url, headers, err := putObj.PresignRequest(15 * time.Minute)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "s3 upload: sign request")
|
||||
}
|
||||
|
||||
// Set request to nil if empty so as not to make chunked encoding
|
||||
if size == 0 {
|
||||
in = nil
|
||||
}
|
||||
|
||||
// create the vanilla http request
|
||||
httpReq, err := http.NewRequest("PUT", url, in)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "s3 upload: new request")
|
||||
}
|
||||
|
||||
// set the headers we signed and the length
|
||||
httpReq.Header = headers
|
||||
httpReq.ContentLength = size
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Do(httpReq)
|
||||
if err != nil {
|
||||
return o.fs.shouldRetry(err)
|
||||
}
|
||||
body, err := rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
return o.fs.shouldRetry(err)
|
||||
}
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
|
||||
return false, nil
|
||||
}
|
||||
err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
|
||||
return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
@@ -1546,7 +1935,7 @@ func (o *Object) Remove() error {
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := o.fs.c.DeleteObject(&req)
|
||||
return shouldRetry(err)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test S3 filesystem interface
|
||||
package s3_test
|
||||
package s3
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/s3"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,19 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestS3:",
|
||||
NilObject: (*s3.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -44,16 +44,7 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
req.Header.Set("Date", date)
|
||||
|
||||
// Sort out URI
|
||||
uri := req.URL.Opaque
|
||||
if uri != "" {
|
||||
if strings.HasPrefix(uri, "//") {
|
||||
// Strip off //host/uri
|
||||
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
|
||||
req.URL.Opaque = uri // reset to plain URI otherwise Ceph gets confused
|
||||
}
|
||||
} else {
|
||||
uri = req.URL.Path
|
||||
}
|
||||
uri := req.URL.EscapedPath()
|
||||
if uri == "" {
|
||||
uri = "/"
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/xanzy/ssh-agent"
|
||||
sshagent "github.com/xanzy/ssh-agent"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
@@ -66,7 +66,22 @@ func init() {
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_file",
|
||||
Help: "Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.",
|
||||
Help: "Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.",
|
||||
}, {
|
||||
Name: "key_file_pass",
|
||||
Help: `The passphrase to decrypt the PEM-encoded private key file.
|
||||
|
||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_use_agent",
|
||||
Help: `When set forces the usage of the ssh-agent.
|
||||
|
||||
When key-file is also set, the ".pub" file of the specified key-file is read and only the associated key is
|
||||
requested from the ssh-agent. This allows to avoid ` + "`Too many authentication failures for *username*`" + ` errors
|
||||
when the ssh-agent contains many keys.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "use_insecure_cipher",
|
||||
Help: "Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.",
|
||||
@@ -90,9 +105,20 @@ func init() {
|
||||
Help: "Allow asking for SFTP password when needed.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "path_override",
|
||||
Default: "",
|
||||
Help: "Override path used by SSH connection.",
|
||||
Name: "path_override",
|
||||
Default: "",
|
||||
Help: `Override path used by SSH connection.
|
||||
|
||||
This allows checksum calculation when SFTP and SSH paths are
|
||||
different. This issue affects among others Synology NAS boxes.
|
||||
|
||||
Shared folders can be found in directories representing volumes
|
||||
|
||||
rclone sync /home/local/directory remote:/directory --ssh-path-override /volume2/directory
|
||||
|
||||
Home directory can be found in a shared folder called "home"
|
||||
|
||||
rclone sync /home/local/directory remote:/home/directory --ssh-path-override /volume1/homes/USER/directory`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "set_modtime",
|
||||
@@ -111,6 +137,8 @@ type Options struct {
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
@@ -287,6 +315,18 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// shellExpand replaces a leading "~" with "${HOME}" and expands all environment
|
||||
// variables afterwards.
|
||||
func shellExpand(s string) string {
|
||||
if s != "" {
|
||||
if s[0] == '~' {
|
||||
s = "${HOME}" + s[1:]
|
||||
}
|
||||
s = os.ExpandEnv(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -314,8 +354,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
|
||||
}
|
||||
|
||||
keyFile := shellExpand(opt.KeyFile)
|
||||
// Add ssh agent-auth if no password or file specified
|
||||
if opt.Pass == "" && opt.KeyFile == "" {
|
||||
if (opt.Pass == "" && keyFile == "") || opt.KeyUseAgent {
|
||||
sshAgentClient, _, err := sshagent.New()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
|
||||
@@ -324,16 +365,46 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read ssh agent signers")
|
||||
}
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
|
||||
if keyFile != "" {
|
||||
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read public key file")
|
||||
}
|
||||
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse public key file")
|
||||
}
|
||||
pubM := pub.Marshal()
|
||||
found := false
|
||||
for _, s := range signers {
|
||||
if bytes.Equal(pubM, s.PublicKey().Marshal()) {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(s))
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, errors.New("private key not found in the ssh-agent")
|
||||
}
|
||||
} else {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
|
||||
}
|
||||
}
|
||||
|
||||
// Load key file if specified
|
||||
if opt.KeyFile != "" {
|
||||
key, err := ioutil.ReadFile(opt.KeyFile)
|
||||
if keyFile != "" {
|
||||
key, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read private key file")
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKey(key)
|
||||
clearpass := ""
|
||||
if opt.KeyFilePass != "" {
|
||||
clearpass, err = obscure.Reveal(opt.KeyFilePass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKeyWithPassphrase(key, []byte(clearpass))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse private key file")
|
||||
}
|
||||
@@ -494,9 +565,13 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// If file is a symlink (not a regular file is the best cross platform test we can do), do a stat to
|
||||
// pick up the size and type of the destination, instead of the size and type of the symlink.
|
||||
if !info.Mode().IsRegular() {
|
||||
oldInfo := info
|
||||
info, err = f.stat(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "stat of non-regular file/dir failed")
|
||||
if !os.IsNotExist(err) {
|
||||
fs.Errorf(remote, "stat of non-regular file/dir failed: %v", err)
|
||||
}
|
||||
info = oldInfo
|
||||
}
|
||||
}
|
||||
if info.IsDir() {
|
||||
@@ -583,12 +658,22 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
// Check to see if directory is empty as some servers will
|
||||
// delete recursively with RemoveDirectory
|
||||
entries, err := f.List(dir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
// Remove the directory
|
||||
root := path.Join(f.root, dir)
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
err = c.sftpClient.Remove(root)
|
||||
err = c.sftpClient.RemoveDirectory(root)
|
||||
f.putSftpConnection(&c, err)
|
||||
return err
|
||||
}
|
||||
@@ -758,6 +843,10 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
if o.fs.opt.DisableHashCheck {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
c, err := o.fs.getSftpConnection()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Hash get SFTP connection")
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -29,13 +30,32 @@ import (
|
||||
const (
|
||||
directoryMarkerContentType = "application/directory" // content type of directory marker objects
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
defaultChunkSize = 5 * fs.GibiByte
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// SharedOptions are shared between swift and hubic
|
||||
var SharedOptions = []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
Help: "Above this size files will be chunked into a _segments container.",
|
||||
Default: fs.SizeSuffix(5 * 1024 * 1024 * 1024),
|
||||
Name: "chunk_size",
|
||||
Help: `Above this size files will be chunked into a _segments container.
|
||||
|
||||
Above this size files will be chunked into a _segments container. The
|
||||
default for this is 5GB which is its maximum value.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_chunk",
|
||||
Help: `Don't chunk files during streaming upload.
|
||||
|
||||
When doing streaming uploads (eg using rcat or mount) setting this
|
||||
flag will cause the swift backend to not upload chunked files.
|
||||
|
||||
This will limit the maximum upload size to 5GB. However non chunked
|
||||
files are easier to deal with and have an MD5SUM.
|
||||
|
||||
Rclone will still chunk files bigger than chunk_size when doing normal
|
||||
copy operations.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}}
|
||||
|
||||
@@ -110,6 +130,15 @@ func init() {
|
||||
}, {
|
||||
Name: "auth_token",
|
||||
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)",
|
||||
}, {
|
||||
Name: "application_credential_id",
|
||||
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)",
|
||||
}, {
|
||||
Name: "application_credential_name",
|
||||
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)",
|
||||
}, {
|
||||
Name: "application_credential_secret",
|
||||
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)",
|
||||
}, {
|
||||
Name: "auth_version",
|
||||
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)",
|
||||
@@ -129,8 +158,13 @@ func init() {
|
||||
Value: "admin",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_policy",
|
||||
Help: "The storage policy to use when creating a new container",
|
||||
Name: "storage_policy",
|
||||
Help: `The storage policy to use when creating a new container
|
||||
|
||||
This applies the specified storage policy when creating a new
|
||||
container. The policy cannot be changed afterwards. The allowed
|
||||
configuration values and their meaning depend on your Swift storage
|
||||
provider.`,
|
||||
Default: "",
|
||||
Examples: []fs.OptionExample{{
|
||||
Help: "Default",
|
||||
@@ -148,22 +182,26 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
User string `config:"user"`
|
||||
Key string `config:"key"`
|
||||
Auth string `config:"auth"`
|
||||
UserID string `config:"user_id"`
|
||||
Domain string `config:"domain"`
|
||||
Tenant string `config:"tenant"`
|
||||
TenantID string `config:"tenant_id"`
|
||||
TenantDomain string `config:"tenant_domain"`
|
||||
Region string `config:"region"`
|
||||
StorageURL string `config:"storage_url"`
|
||||
AuthToken string `config:"auth_token"`
|
||||
AuthVersion int `config:"auth_version"`
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
User string `config:"user"`
|
||||
Key string `config:"key"`
|
||||
Auth string `config:"auth"`
|
||||
UserID string `config:"user_id"`
|
||||
Domain string `config:"domain"`
|
||||
Tenant string `config:"tenant"`
|
||||
TenantID string `config:"tenant_id"`
|
||||
TenantDomain string `config:"tenant_domain"`
|
||||
Region string `config:"region"`
|
||||
StorageURL string `config:"storage_url"`
|
||||
AuthToken string `config:"auth_token"`
|
||||
AuthVersion int `config:"auth_version"`
|
||||
ApplicationCredentialID string `config:"application_credential_id"`
|
||||
ApplicationCredentialName string `config:"application_credential_name"`
|
||||
ApplicationCredentialSecret string `config:"application_credential_secret"`
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NoChunk bool `config:"no_chunk"`
|
||||
}
|
||||
|
||||
// Fs represents a remote swift server
|
||||
@@ -178,16 +216,20 @@ type Fs struct {
|
||||
containerOK bool // true if we have created the container
|
||||
segmentsContainer string // container to store the segments (if any) in
|
||||
noCheckContainer bool // don't check the container before creating it
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
info swift.Object // Info from the swift object if known
|
||||
headers swift.Headers // The object headers if known
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64
|
||||
lastModified time.Time
|
||||
contentType string
|
||||
md5 string
|
||||
headers swift.Headers // The object headers if known
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -218,6 +260,32 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
401, // Unauthorized (eg "Token has expired")
|
||||
408, // Request Timeout
|
||||
409, // Conflict - various states that could be resolved on a retry
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
503, // Service Unavailable/Slow Down - "Reduce your request rate"
|
||||
504, // Gateway Time-out
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
// If this is an swift.Error object extract the HTTP error code
|
||||
if swiftError, ok := err.(*swift.Error); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if swiftError.StatusCode == e {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check for generic failure conditions
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// Pattern to match a swift path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
@@ -237,22 +305,25 @@ func parsePath(path string) (container, directory string, err error) {
|
||||
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
c := &swift.Connection{
|
||||
// Keep these in the same order as the Config for ease of checking
|
||||
UserName: opt.User,
|
||||
ApiKey: opt.Key,
|
||||
AuthUrl: opt.Auth,
|
||||
UserId: opt.UserID,
|
||||
Domain: opt.Domain,
|
||||
Tenant: opt.Tenant,
|
||||
TenantId: opt.TenantID,
|
||||
TenantDomain: opt.TenantDomain,
|
||||
Region: opt.Region,
|
||||
StorageUrl: opt.StorageURL,
|
||||
AuthToken: opt.AuthToken,
|
||||
AuthVersion: opt.AuthVersion,
|
||||
EndpointType: swift.EndpointType(opt.EndpointType),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(fs.Config),
|
||||
UserName: opt.User,
|
||||
ApiKey: opt.Key,
|
||||
AuthUrl: opt.Auth,
|
||||
UserId: opt.UserID,
|
||||
Domain: opt.Domain,
|
||||
Tenant: opt.Tenant,
|
||||
TenantId: opt.TenantID,
|
||||
TenantDomain: opt.TenantDomain,
|
||||
Region: opt.Region,
|
||||
StorageUrl: opt.StorageURL,
|
||||
AuthToken: opt.AuthToken,
|
||||
AuthVersion: opt.AuthVersion,
|
||||
ApplicationCredentialId: opt.ApplicationCredentialID,
|
||||
ApplicationCredentialName: opt.ApplicationCredentialName,
|
||||
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
|
||||
EndpointType: swift.EndpointType(opt.EndpointType),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(fs.Config),
|
||||
}
|
||||
if opt.EnvAuth {
|
||||
err := c.ApplyEnvironment()
|
||||
@@ -262,11 +333,13 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
}
|
||||
StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint
|
||||
if !c.Authenticated() {
|
||||
if c.UserName == "" && c.UserId == "" {
|
||||
return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
|
||||
}
|
||||
if c.ApiKey == "" {
|
||||
return nil, errors.New("key not found")
|
||||
if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret == "" {
|
||||
if c.UserName == "" && c.UserId == "" {
|
||||
return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
|
||||
}
|
||||
if c.ApiKey == "" {
|
||||
return nil, errors.New("key not found")
|
||||
}
|
||||
}
|
||||
if c.AuthUrl == "" {
|
||||
return nil, errors.New("auth not found")
|
||||
@@ -294,6 +367,22 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFsWithConnection constructs an Fs from the path, container:path
|
||||
// and authenticated connection.
|
||||
//
|
||||
@@ -312,6 +401,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
segmentsContainer: container + "_segments",
|
||||
root: directory,
|
||||
noCheckContainer: noCheckContainer,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -321,7 +411,11 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists - ignoring directory markers
|
||||
info, _, err := f.c.Object(container, directory)
|
||||
var info swift.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, _, err = f.c.Object(container, directory)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil && info.ContentType != directoryMarkerContentType {
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
@@ -336,7 +430,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -344,6 +438,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "swift: chunk size")
|
||||
}
|
||||
|
||||
c, err := swiftConnection(opt, name)
|
||||
if err != nil {
|
||||
@@ -369,7 +467,10 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not headers
|
||||
o.info = *info
|
||||
err := o.decodeMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
@@ -407,7 +508,12 @@ func (f *Fs) listContainerRoot(container, root string, dir string, recurse bool,
|
||||
}
|
||||
rootLength := len(root)
|
||||
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
objects, err := f.c.Objects(container, opts)
|
||||
var objects []swift.Object
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = f.c.Objects(container, opts)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
for i := range objects {
|
||||
object := &objects[i]
|
||||
@@ -496,7 +602,11 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
containers, err := f.c.ContainersAll(nil)
|
||||
var containers []swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "container listing failed")
|
||||
}
|
||||
@@ -557,7 +667,12 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
containers, err := f.c.ContainersAll(nil)
|
||||
var containers []swift.Container
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "container listing failed")
|
||||
}
|
||||
@@ -607,14 +722,20 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// Check to see if container exists first
|
||||
var err error = swift.ContainerNotFound
|
||||
if !f.noCheckContainer {
|
||||
_, _, err = f.c.Container(f.container)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, _, err = f.c.Container(f.container)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if f.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = f.opt.StoragePolicy
|
||||
}
|
||||
err = f.c.ContainerCreate(f.container, headers)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerCreate(f.container, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
if err == nil {
|
||||
f.containerOK = true
|
||||
@@ -631,7 +752,11 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
err := f.c.ContainerDelete(f.container)
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerDelete(f.container)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.containerOK = false
|
||||
}
|
||||
@@ -690,7 +815,10 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
_, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -739,7 +867,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
fs.Debugf(o, "Returning empty Md5sum for swift large object")
|
||||
return "", nil
|
||||
}
|
||||
return strings.ToLower(o.info.Hash), nil
|
||||
return strings.ToLower(o.md5), nil
|
||||
}
|
||||
|
||||
// hasHeader checks for the header passed in returning false if the
|
||||
@@ -768,7 +896,22 @@ func (o *Object) isStaticLargeObject() (bool, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.info.Bytes
|
||||
return o.size
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata in the object from a swift.Object
|
||||
//
|
||||
// Sets
|
||||
// o.lastModified
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.contentType
|
||||
func (o *Object) decodeMetaData(info *swift.Object) (err error) {
|
||||
o.lastModified = info.LastModified
|
||||
o.size = info.Bytes
|
||||
o.md5 = info.Hash
|
||||
o.contentType = info.ContentType
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
@@ -780,15 +923,23 @@ func (o *Object) readMetaData() (err error) {
|
||||
if o.headers != nil {
|
||||
return nil
|
||||
}
|
||||
info, h, err := o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
|
||||
var info swift.Object
|
||||
var h swift.Headers
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, h, err = o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if err == swift.ObjectNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
o.info = info
|
||||
o.headers = h
|
||||
err = o.decodeMetaData(&info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -799,17 +950,17 @@ func (o *Object) readMetaData() (err error) {
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
if fs.Config.UseServerModTime {
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read metadata: %s", err)
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
modTime, err := o.headers.ObjectMetadata().GetModTime()
|
||||
if err != nil {
|
||||
// fs.Logf(o, "Failed to read mtime from object: %v", err)
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
@@ -832,7 +983,10 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
newHeaders[k] = v
|
||||
}
|
||||
}
|
||||
return o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
@@ -840,14 +994,17 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
// It compares the Content-Type to directoryMarkerContentType - that
|
||||
// makes it a directory marker which is not storable.
|
||||
func (o *Object) Storable() bool {
|
||||
return o.info.ContentType != directoryMarkerContentType
|
||||
return o.contentType != directoryMarkerContentType
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
_, isRanging := headers["Range"]
|
||||
in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -874,13 +1031,20 @@ func (o *Object) removeSegments(except string) error {
|
||||
}
|
||||
segmentPath := segmentsRoot + remote
|
||||
fs.Debugf(o, "Removing segment file %q in container %q", segmentPath, o.fs.segmentsContainer)
|
||||
return o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
|
||||
var err error
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the segments container if empty, ignore errors
|
||||
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Debugf(o, "Removed empty container %q", o.fs.segmentsContainer)
|
||||
}
|
||||
@@ -909,13 +1073,19 @@ func urlEncode(str string) string {
|
||||
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
|
||||
// Create the segmentsContainer if it doesn't exist
|
||||
var err error
|
||||
_, _, err = o.fs.c.Container(o.fs.segmentsContainer)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, _, err = o.fs.c.Container(o.fs.segmentsContainer)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if o.fs.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
|
||||
}
|
||||
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -944,7 +1114,10 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
segmentReader := io.LimitReader(in, n)
|
||||
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
|
||||
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
|
||||
_, err := o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -955,7 +1128,10 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
headers["Content-Length"] = "0" // set Content-Length as we know it
|
||||
emptyReader := bytes.NewReader(nil)
|
||||
manifestName := o.fs.root + o.remote
|
||||
_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return uniquePrefix + "/", err
|
||||
}
|
||||
|
||||
@@ -985,17 +1161,31 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
contentType := fs.MimeType(src)
|
||||
headers := m.ObjectHeaders()
|
||||
uniquePrefix := ""
|
||||
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.headers = nil // wipe old metadata
|
||||
} else {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length as we know it
|
||||
_, err := o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
if size >= 0 {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
|
||||
}
|
||||
var rxHeaders swift.Headers
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// set Metadata since ObjectPut checked the hash and length so we know the
|
||||
// object has been safely uploaded
|
||||
o.lastModified = modTime
|
||||
o.size = size
|
||||
o.md5 = rxHeaders["ETag"]
|
||||
o.contentType = contentType
|
||||
o.headers = headers
|
||||
}
|
||||
|
||||
// If file was a dynamic large object then remove old/all segments
|
||||
@@ -1006,8 +1196,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.headers = nil // wipe old metadata
|
||||
// Read the metadata from the newly created object if necessary
|
||||
return o.readMetaData()
|
||||
}
|
||||
|
||||
@@ -1018,7 +1207,10 @@ func (o *Object) Remove() error {
|
||||
return err
|
||||
}
|
||||
// Remove file/manifest first
|
||||
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1034,7 +1226,7 @@ func (o *Object) Remove() error {
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
return o.info.ContentType
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test Swift filesystem interface
|
||||
package swift_test
|
||||
package swift
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,12 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSwift:",
|
||||
NilObject: (*swift.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "union",
|
||||
Description: "Builds a stackable unification remote, which can appear to merge the contents of several remotes",
|
||||
Description: "A stackable unification remote, which can appear to merge the contents of several remotes",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remotes",
|
||||
@@ -35,13 +35,36 @@ type Options struct {
|
||||
Remotes fs.SpaceSepList `config:"remotes"`
|
||||
}
|
||||
|
||||
// Fs represents a remote acd server
|
||||
// Fs represents a union of remotes
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
root string // the path we are working on
|
||||
remotes []fs.Fs // slice of remotes
|
||||
wr fs.Fs // writable remote
|
||||
hashSet hash.Set // intersection of hash types
|
||||
}
|
||||
|
||||
// Object describes a union Object
|
||||
//
|
||||
// This is a wrapped object which returns the Union Fs as its parent
|
||||
type Object struct {
|
||||
fs.Object
|
||||
fs *Fs // what this object is part of
|
||||
}
|
||||
|
||||
// Wrap an existing object in the union Object
|
||||
func (f *Fs) wrapObject(o fs.Object) *Object {
|
||||
return &Object{
|
||||
Object: o,
|
||||
fs: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns the union Fs as the parent
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
@@ -66,18 +89,146 @@ func (f *Fs) Features() *fs.Features {
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return f.remotes[len(f.remotes)-1].Rmdir(dir)
|
||||
return f.wr.Rmdir(dir)
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
// This could probably be set if all remotes share the same hashing algorithm
|
||||
return hash.Set(hash.None)
|
||||
return f.hashSet
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
return f.remotes[len(f.remotes)-1].Mkdir(dir)
|
||||
return f.wr.Mkdir(dir)
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge() error {
|
||||
return f.wr.Features().Purge()
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
if src.Fs() != f.wr {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
o, err := f.wr.Features().Copy(src, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
if src.Fs() != f.wr {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
o, err := f.wr.Features().Move(src, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return f.wr.Features().DirMove(srcFs.wr, srcRemote, dstRemote)
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
// At least one value will be written to the channel,
|
||||
// specifying the initial value and updated values might
|
||||
// follow. A 0 Duration should pause the polling.
|
||||
// The ChangeNotify implementation must empty the channel
|
||||
// regularly. When the channel gets closed, the implementation
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(fn func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
var remoteChans []chan time.Duration
|
||||
|
||||
for _, remote := range f.remotes {
|
||||
if ChangeNotify := remote.Features().ChangeNotify; ChangeNotify != nil {
|
||||
ch := make(chan time.Duration)
|
||||
remoteChans = append(remoteChans, ch)
|
||||
ChangeNotify(fn, ch)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
for i := range ch {
|
||||
for _, c := range remoteChans {
|
||||
c <- i
|
||||
}
|
||||
}
|
||||
for _, c := range remoteChans {
|
||||
close(c)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
for _, remote := range f.remotes {
|
||||
if DirCacheFlush := remote.Features().DirCacheFlush; DirCacheFlush != nil {
|
||||
DirCacheFlush()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.wr.Features().PutStream(in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
return f.wr.Features().About()
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
@@ -86,7 +237,11 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.remotes[len(f.remotes)-1].Put(in, src, options...)
|
||||
o, err := f.wr.Put(in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
@@ -117,8 +272,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if !found {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
for key := range set {
|
||||
entries = append(entries, set[key])
|
||||
for _, entry := range set {
|
||||
if o, ok := entry.(fs.Object); ok {
|
||||
entry = f.wrapObject(o)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
@@ -134,7 +292,7 @@ func (f *Fs) NewObject(path string) (fs.Object, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "NewObject failed on %v", remote)
|
||||
}
|
||||
return obj, nil
|
||||
return f.wrapObject(obj), nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
@@ -204,6 +362,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
remotes: remotes,
|
||||
wr: remotes[len(remotes)-1],
|
||||
}
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -212,16 +371,58 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
features = features.Mask(f.wr) // mask the features just on the writable fs
|
||||
|
||||
// Really need the union of all remotes for these, so
|
||||
// re-instate and calculate separately.
|
||||
features.ChangeNotify = f.ChangeNotify
|
||||
features.DirCacheFlush = f.DirCacheFlush
|
||||
|
||||
// FIXME maybe should be masking the bools here?
|
||||
|
||||
// Clear ChangeNotify and DirCacheFlush if all are nil
|
||||
clearChangeNotify := true
|
||||
clearDirCacheFlush := true
|
||||
for _, remote := range f.remotes {
|
||||
features = features.Mask(remote)
|
||||
remoteFeatures := remote.Features()
|
||||
if remoteFeatures.ChangeNotify != nil {
|
||||
clearChangeNotify = false
|
||||
}
|
||||
if remoteFeatures.DirCacheFlush != nil {
|
||||
clearDirCacheFlush = false
|
||||
}
|
||||
}
|
||||
if clearChangeNotify {
|
||||
features.ChangeNotify = nil
|
||||
}
|
||||
if clearDirCacheFlush {
|
||||
features.DirCacheFlush = nil
|
||||
}
|
||||
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
hashSet := f.remotes[0].Hashes()
|
||||
for _, remote := range f.remotes[1:] {
|
||||
hashSet = hashSet.Overlap(remote.Hashes())
|
||||
}
|
||||
f.hashSet = hashSet
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -6,7 +6,11 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -62,11 +66,13 @@ type Response struct {
|
||||
// Note that status collects all the status values for which we just
|
||||
// check the first is OK.
|
||||
type Prop struct {
|
||||
Status []string `xml:"DAV: status"`
|
||||
Name string `xml:"DAV: prop>displayname,omitempty"`
|
||||
Type *xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"`
|
||||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
Status []string `xml:"DAV: status"`
|
||||
Name string `xml:"DAV: prop>displayname,omitempty"`
|
||||
Type *xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"`
|
||||
IsCollection *string `xml:"DAV: prop>iscollection,omitempty"` // this is a Microsoft extension see #2716
|
||||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
Checksums []string `xml:"prop>checksums>checksum,omitempty"`
|
||||
}
|
||||
|
||||
// Parse a status of the form "HTTP/1.1 200 OK" or "HTTP/1.1 200"
|
||||
@@ -92,13 +98,33 @@ func (p *Prop) StatusOK() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Hashes returns a map of all checksums - may be nil
|
||||
func (p *Prop) Hashes() (hashes map[hash.Type]string) {
|
||||
if len(p.Checksums) == 0 {
|
||||
return nil
|
||||
}
|
||||
hashes = make(map[hash.Type]string)
|
||||
for _, checksums := range p.Checksums {
|
||||
checksums = strings.ToLower(checksums)
|
||||
for _, checksum := range strings.Split(checksums, " ") {
|
||||
switch {
|
||||
case strings.HasPrefix(checksum, "sha1:"):
|
||||
hashes[hash.SHA1] = checksum[5:]
|
||||
case strings.HasPrefix(checksum, "md5:"):
|
||||
hashes[hash.MD5] = checksum[4:]
|
||||
}
|
||||
}
|
||||
}
|
||||
return hashes
|
||||
}
|
||||
|
||||
// PropValue is a tagged name and value
|
||||
type PropValue struct {
|
||||
XMLName xml.Name `xml:""`
|
||||
Value string `xml:",chardata"`
|
||||
}
|
||||
|
||||
// Error is used to desribe webdav errors
|
||||
// Error is used to describe webdav errors
|
||||
//
|
||||
// <d:error xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns">
|
||||
// <s:exception>Sabre\DAV\Exception\NotFound</s:exception>
|
||||
@@ -111,7 +137,7 @@ type Error struct {
|
||||
StatusCode int
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
var out []string
|
||||
if e.Message != "" {
|
||||
@@ -145,8 +171,11 @@ var timeFormats = []string{
|
||||
time.RFC1123Z, // Fri, 05 Jan 2018 14:14:38 +0000 (as used by mydrive.ch)
|
||||
time.UnixDate, // Wed May 17 15:31:58 UTC 2017 (as used in an internal server)
|
||||
noZerosRFC1123, // Fri, 7 Sep 2018 08:49:58 GMT (as used by server in #2574)
|
||||
time.RFC3339, // Wed, 31 Oct 2018 13:57:11 CET (as used by komfortcloud.de)
|
||||
}
|
||||
|
||||
var oneTimeError sync.Once
|
||||
|
||||
// UnmarshalXML turns XML into a Time
|
||||
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var v string
|
||||
@@ -155,6 +184,12 @@ func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// If time is missing then return the epoch
|
||||
if v == "" {
|
||||
*t = Time(time.Unix(0, 0))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse the time format in multiple possible ways
|
||||
var newT time.Time
|
||||
for _, timeFormat := range timeFormats {
|
||||
@@ -164,5 +199,33 @@ func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
oneTimeError.Do(func() {
|
||||
fs.Errorf(nil, "Failed to parse time %q - using the epoch", v)
|
||||
})
|
||||
// Return the epoch instead
|
||||
*t = Time(time.Unix(0, 0))
|
||||
// ignore error
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Quota is used to read the bytes used and available
|
||||
//
|
||||
// <d:multistatus xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns">
|
||||
// <d:response>
|
||||
// <d:href>/remote.php/webdav/</d:href>
|
||||
// <d:propstat>
|
||||
// <d:prop>
|
||||
// <d:quota-available-bytes>-3</d:quota-available-bytes>
|
||||
// <d:quota-used-bytes>376461895</d:quota-used-bytes>
|
||||
// </d:prop>
|
||||
// <d:status>HTTP/1.1 200 OK</d:status>
|
||||
// </d:propstat>
|
||||
// </d:response>
|
||||
// </d:multistatus>
|
||||
type Quota struct {
|
||||
Available int64 `xml:"DAV: response>propstat>prop>quota-available-bytes"`
|
||||
Used int64 `xml:"DAV: response>propstat>prop>quota-used-bytes"`
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ func (ca *CookieAuth) Cookies() (*CookieResponse, error) {
|
||||
func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error) {
|
||||
spRoot, err := url.Parse(ca.endpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error while contructing endpoint URL")
|
||||
return nil, errors.Wrap(err, "Error while constructing endpoint URL")
|
||||
}
|
||||
|
||||
u, err := url.Parse("https://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
|
||||
@@ -121,7 +121,7 @@ func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error
|
||||
Jar: jar,
|
||||
}
|
||||
|
||||
// Send the previously aquired Token as a Post parameter
|
||||
// Send the previously acquired Token as a Post parameter
|
||||
if _, err = client.Post(u.String(), "text/xml", strings.NewReader(conf.Succ.Token)); err != nil {
|
||||
return nil, errors.Wrap(err, "Error while grabbing cookies from endpoint: %v")
|
||||
}
|
||||
|
||||
29
backend/webdav/odrvcookie/renew.go
Normal file
29
backend/webdav/odrvcookie/renew.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package odrvcookie
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// CookieRenew holds information for the renew
|
||||
type CookieRenew struct {
|
||||
timer *time.Ticker
|
||||
renewFn func()
|
||||
}
|
||||
|
||||
// NewRenew returns and starts a CookieRenew
|
||||
func NewRenew(interval time.Duration, renewFn func()) *CookieRenew {
|
||||
renew := CookieRenew{
|
||||
timer: time.NewTicker(interval),
|
||||
renewFn: renewFn,
|
||||
}
|
||||
go renew.Renew()
|
||||
return &renew
|
||||
}
|
||||
|
||||
// Renew calls the renewFn for every tick
|
||||
func (c *CookieRenew) Renew() {
|
||||
for {
|
||||
<-c.timer.C // wait for tick
|
||||
c.renewFn()
|
||||
}
|
||||
}
|
||||
@@ -2,23 +2,13 @@
|
||||
// object storage system.
|
||||
package webdav
|
||||
|
||||
// Owncloud: Getting Oc-Checksum:
|
||||
// SHA1:f572d396fae9206628714fb2ce00f72e94f2258f on HEAD but not on
|
||||
// nextcloud?
|
||||
|
||||
// docs for file webdav
|
||||
// https://docs.nextcloud.com/server/12/developer_manual/client_apis/WebDAV/index.html
|
||||
|
||||
// indicates checksums can be set as metadata here
|
||||
// https://github.com/nextcloud/server/issues/6129
|
||||
// owncloud seems to have checksums as metadata though - can read them
|
||||
|
||||
// SetModTime might be possible
|
||||
// https://stackoverflow.com/questions/3579608/webdav-can-a-client-modify-the-mtime-of-a-file
|
||||
// ...support for a PROPSET to lastmodified (mind the missing get) which does the utime() call might be an option.
|
||||
// For example the ownCloud WebDAV server does it that way.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -31,7 +21,6 @@ import (
|
||||
"github.com/ncw/rclone/backend/webdav/api"
|
||||
"github.com/ncw/rclone/backend/webdav/odrvcookie"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
@@ -96,10 +85,11 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
URL string `config:"url"`
|
||||
Vendor string `config:"vendor"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
URL string `config:"url"`
|
||||
Vendor string `config:"vendor"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
BearerToken string `config:"bearer_token"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@@ -116,6 +106,7 @@ type Fs struct {
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
|
||||
hasChecksums bool // set if can use owncloud style checksums
|
||||
}
|
||||
|
||||
// Object describes a webdav object
|
||||
@@ -127,7 +118,8 @@ type Object struct {
|
||||
hasMetaData bool // whether info below has been set
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
sha1 string // SHA-1 of the object content if known
|
||||
md5 string // MD5 of the object content if known
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -180,6 +172,18 @@ func itemIsDir(item *api.Response) bool {
|
||||
}
|
||||
fs.Debugf(nil, "Unknown resource type %q/%q on %q", t.Space, t.Local, item.Props.Name)
|
||||
}
|
||||
// the iscollection prop is a Microsoft extension, but if present it is a reliable indicator
|
||||
// if the above check failed - see #2716. This can be an integer or a boolean - see #2964
|
||||
if t := item.Props.IsCollection; t != nil {
|
||||
switch x := strings.ToLower(*t); x {
|
||||
case "0", "false":
|
||||
return false
|
||||
case "1", "true":
|
||||
return true
|
||||
default:
|
||||
fs.Debugf(nil, "Unknown value %q for IsCollection", x)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -194,6 +198,9 @@ func (f *Fs) readMetaDataForPath(path string, depth string) (info *api.Prop, err
|
||||
},
|
||||
NoRedirect: true,
|
||||
}
|
||||
if f.hasChecksums {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -249,7 +256,7 @@ func errorHandler(resp *http.Response) error {
|
||||
return errResponse
|
||||
}
|
||||
|
||||
// addShlash makes sure s is terminated with a / if non empty
|
||||
// addSlash makes sure s is terminated with a / if non empty
|
||||
func addSlash(s string) string {
|
||||
if s != "" && !strings.HasSuffix(s, "/") {
|
||||
s += "/"
|
||||
@@ -283,9 +290,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
bearerToken := config.FileGet(name, "bearer_token")
|
||||
if !strings.HasSuffix(opt.URL, "/") {
|
||||
opt.URL += "/"
|
||||
}
|
||||
@@ -320,10 +324,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
if user != "" || pass != "" {
|
||||
if opt.User != "" || opt.Pass != "" {
|
||||
f.srv.SetUserPass(opt.User, opt.Pass)
|
||||
} else if bearerToken != "" {
|
||||
f.srv.SetHeader("Authorization", "BEARER "+bearerToken)
|
||||
} else if opt.BearerToken != "" {
|
||||
f.srv.SetHeader("Authorization", "BEARER "+opt.BearerToken)
|
||||
}
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
err = f.setQuirks(opt.Vendor)
|
||||
@@ -360,9 +364,11 @@ func (f *Fs) setQuirks(vendor string) error {
|
||||
f.canStream = true
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasChecksums = true
|
||||
case "nextcloud":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasChecksums = true
|
||||
case "sharepoint":
|
||||
// To mount sharepoint, two Cookies are required
|
||||
// They have to be set instead of BasicAuth
|
||||
@@ -372,6 +378,17 @@ func (f *Fs) setQuirks(vendor string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
odrvcookie.NewRenew(12*time.Hour, func() {
|
||||
spCookies, err := spCk.Cookies()
|
||||
if err != nil {
|
||||
fs.Errorf("could not renew cookies: %s", err.Error())
|
||||
return
|
||||
}
|
||||
f.srv.SetCookie(&spCookies.FedAuth, &spCookies.RtFa)
|
||||
fs.Debugf(spCookies, "successfully renewed sharepoint cookies")
|
||||
})
|
||||
|
||||
f.srv.SetCookie(&spCookies.FedAuth, &spCookies.RtFa)
|
||||
|
||||
// sharepoint, unlike the other vendors, only lists files if the depth header is set to 0
|
||||
@@ -418,6 +435,22 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// Read the normal props, plus the checksums
|
||||
//
|
||||
// <oc:checksums><oc:checksum>SHA1:f572d396fae9206628714fb2ce00f72e94f2258f MD5:b1946ac92492d2347c6235b4d2611184 ADLER32:084b021f</oc:checksum></oc:checksums>
|
||||
var owncloudProps = []byte(`<?xml version="1.0"?>
|
||||
<d:propfind xmlns:d="DAV:" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns">
|
||||
<d:prop>
|
||||
<d:displayname />
|
||||
<d:getlastmodified />
|
||||
<d:getcontentlength />
|
||||
<d:resourcetype />
|
||||
<d:getcontenttype />
|
||||
<oc:checksums />
|
||||
</d:prop>
|
||||
</d:propfind>
|
||||
`)
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
@@ -437,6 +470,9 @@ func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth str
|
||||
"Depth": depth,
|
||||
},
|
||||
}
|
||||
if f.hasChecksums {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -593,10 +629,9 @@ func (f *Fs) mkParentDir(dirPath string) error {
|
||||
return f.mkdir(parent)
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// We assume the root is already ceated
|
||||
// low level mkdir, only makes the directory, doesn't attempt to create parents
|
||||
func (f *Fs) _mkdir(dirPath string) error {
|
||||
// We assume the root is already created
|
||||
if dirPath == "" {
|
||||
return nil
|
||||
}
|
||||
@@ -609,20 +644,27 @@ func (f *Fs) mkdir(dirPath string) error {
|
||||
Path: dirPath,
|
||||
NoResponse: true,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
err := f._mkdir(dirPath)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// already exists
|
||||
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable {
|
||||
// owncloud returns 423/StatusLocked if the create is already in progress
|
||||
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable || apiErr.StatusCode == http.StatusLocked {
|
||||
return nil
|
||||
}
|
||||
// parent does not exists
|
||||
// parent does not exist
|
||||
if apiErr.StatusCode == http.StatusConflict {
|
||||
err = f.mkParentDir(dirPath)
|
||||
if err == nil {
|
||||
err = f.mkdir(dirPath)
|
||||
err = f._mkdir(dirPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -834,9 +876,52 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
if f.hasChecksums {
|
||||
return hash.NewHashSet(hash.MD5, hash.SHA1)
|
||||
}
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PROPFIND",
|
||||
Path: "",
|
||||
ExtraHeaders: map[string]string{
|
||||
"Depth": "0",
|
||||
},
|
||||
}
|
||||
opts.Body = bytes.NewBuffer([]byte(`<?xml version="1.0" ?>
|
||||
<D:propfind xmlns:D="DAV:">
|
||||
<D:prop>
|
||||
<D:quota-available-bytes/>
|
||||
<D:quota-used-bytes/>
|
||||
</D:prop>
|
||||
</D:propfind>
|
||||
`))
|
||||
var q = api.Quota{
|
||||
Available: -1,
|
||||
Used: -1,
|
||||
}
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &q)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about call failed")
|
||||
}
|
||||
usage := &fs.Usage{}
|
||||
if q.Available >= 0 && q.Used >= 0 {
|
||||
usage.Total = fs.NewUsageValue(q.Available + q.Used)
|
||||
}
|
||||
if q.Used >= 0 {
|
||||
usage.Used = fs.NewUsageValue(q.Used)
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -857,12 +942,17 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
if t != hash.SHA1 {
|
||||
return "", hash.ErrUnsupported
|
||||
if o.fs.hasChecksums {
|
||||
switch t {
|
||||
case hash.SHA1:
|
||||
return o.sha1, nil
|
||||
case hash.MD5:
|
||||
return o.md5, nil
|
||||
}
|
||||
}
|
||||
return o.sha1, nil
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
@@ -880,6 +970,11 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
o.modTime = time.Time(info.Modified)
|
||||
if o.fs.hasChecksums {
|
||||
hashes := info.Hashes()
|
||||
o.sha1 = hashes[hash.SHA1]
|
||||
o.md5 = hashes[hash.MD5]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -957,10 +1052,23 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
Body: in,
|
||||
NoResponse: true,
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: fs.MimeType(src),
|
||||
}
|
||||
if o.fs.useOCMtime {
|
||||
opts.ExtraHeaders = map[string]string{
|
||||
"X-OC-Mtime": fmt.Sprintf("%f", float64(src.ModTime().UnixNano())/1E9),
|
||||
if o.fs.useOCMtime || o.fs.hasChecksums {
|
||||
opts.ExtraHeaders = map[string]string{}
|
||||
if o.fs.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime().UnixNano())/1E9)
|
||||
}
|
||||
if o.fs.hasChecksums {
|
||||
// Set an upload checksum - prefer SHA1
|
||||
//
|
||||
// This is used as an upload integrity test. If we set
|
||||
// only SHA1 here, owncloud will calculate the MD5 too.
|
||||
if sha1, _ := src.Hash(hash.SHA1); sha1 != "" {
|
||||
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
||||
} else if md5, _ := src.Hash(hash.MD5); md5 != "" {
|
||||
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
|
||||
}
|
||||
}
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
@@ -968,6 +1076,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
// Give the WebDAV server a chance to get its internal state in order after the
|
||||
// error. The error may have been local in which case we closed the connection.
|
||||
// The server may still be dealing with it for a moment. A sleep isn't ideal but I
|
||||
// haven't been able to think of a better method to find out if the server has
|
||||
// finished - ncw
|
||||
time.Sleep(1 * time.Second)
|
||||
// Remove failed upload
|
||||
_ = o.Remove()
|
||||
return err
|
||||
@@ -998,5 +1112,6 @@ var (
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package src
|
||||
|
||||
//from yadisk
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
//RootAddr is the base URL for Yandex Disk API.
|
||||
const RootAddr = "https://cloud-api.yandex.com" //also https://cloud-api.yandex.net and https://cloud-api.yandex.ru
|
||||
|
||||
func (c *Client) setRequestScope(req *http.Request) {
|
||||
req.Header.Add("Accept", "application/json")
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
req.Header.Add("Authorization", "OAuth "+c.token)
|
||||
}
|
||||
|
||||
func (c *Client) scopedRequest(method, urlPath string, body io.Reader) (*http.Request, error) {
|
||||
fullURL := RootAddr
|
||||
if urlPath[:1] != "/" {
|
||||
fullURL += "/" + urlPath
|
||||
} else {
|
||||
fullURL += urlPath
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, fullURL, body)
|
||||
if err != nil {
|
||||
return req, err
|
||||
}
|
||||
|
||||
c.setRequestScope(req)
|
||||
return req, nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user