mirror of
https://github.com/rclone/rclone.git
synced 2026-01-22 20:33:17 +00:00
Compare commits
503 Commits
v1.43
...
adb-remote
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c3e1a0f368 | ||
|
|
2683939a4b | ||
|
|
ed88ae878e | ||
|
|
2ba5c35e88 | ||
|
|
240c15883f | ||
|
|
38864adc9c | ||
|
|
5991315990 | ||
|
|
73f0a67d98 | ||
|
|
ffe067d6e7 | ||
|
|
b5f563fb0f | ||
|
|
9310c7f3e2 | ||
|
|
1c1a8ef24b | ||
|
|
2cfbc2852d | ||
|
|
b167d30420 | ||
|
|
ec59760d9c | ||
|
|
076d3da825 | ||
|
|
c3eecbe933 | ||
|
|
d8e5b19ed4 | ||
|
|
43bc381e90 | ||
|
|
fb5ee22112 | ||
|
|
35327dad6f | ||
|
|
ef5e1909a0 | ||
|
|
bca5d8009e | ||
|
|
334f19c974 | ||
|
|
42a5bf1d9f | ||
|
|
71d1890316 | ||
|
|
d29c545627 | ||
|
|
eb85ecc9c4 | ||
|
|
0dc08e1e61 | ||
|
|
76532408ef | ||
|
|
60a4a8a86d | ||
|
|
a0d4c04687 | ||
|
|
f3874707ee | ||
|
|
f8c2689e77 | ||
|
|
8ec55ae20b | ||
|
|
fc1bf5f931 | ||
|
|
578d00666c | ||
|
|
f5c853b5c8 | ||
|
|
23c0cd2482 | ||
|
|
8217f361cc | ||
|
|
a0016e00d1 | ||
|
|
99c37028ee | ||
|
|
cfba337ef0 | ||
|
|
fd370fcad2 | ||
|
|
c680bb3254 | ||
|
|
7d5d6c041f | ||
|
|
bdc638530e | ||
|
|
315cee23a0 | ||
|
|
2135879dda | ||
|
|
da90069462 | ||
|
|
08c4854e00 | ||
|
|
a838add230 | ||
|
|
d68b091170 | ||
|
|
d809bed438 | ||
|
|
3aa1818870 | ||
|
|
96f6708461 | ||
|
|
6641a25f8c | ||
|
|
cd46ce916b | ||
|
|
318d1bb6f9 | ||
|
|
b8b53901e8 | ||
|
|
6e153781a7 | ||
|
|
f27c2d9760 | ||
|
|
eb91356e28 | ||
|
|
bed2971bf0 | ||
|
|
f0696dfe30 | ||
|
|
a43ed567ee | ||
|
|
fffdbb31f5 | ||
|
|
cacefb9a82 | ||
|
|
d966cef14c | ||
|
|
a551978a3f | ||
|
|
97752ca8fb | ||
|
|
8d5d332daf | ||
|
|
6b3a9bf26a | ||
|
|
c1d9a1e174 | ||
|
|
98120bb864 | ||
|
|
f8ced557e3 | ||
|
|
7b20139c6a | ||
|
|
c496efe9a4 | ||
|
|
cf583e0237 | ||
|
|
f09d0f5fef | ||
|
|
1e6cbaa355 | ||
|
|
be643ecfbc | ||
|
|
0c4ed35b9b | ||
|
|
4e4feebf0a | ||
|
|
291f270904 | ||
|
|
f799be1d6a | ||
|
|
74297a0c55 | ||
|
|
7e13103ba2 | ||
|
|
34baf05d9d | ||
|
|
38c0018906 | ||
|
|
6f25e48cbb | ||
|
|
7e99abb5da | ||
|
|
629019c3e4 | ||
|
|
1402fcb234 | ||
|
|
b26276b416 | ||
|
|
e317f04098 | ||
|
|
65ff330602 | ||
|
|
52763e1918 | ||
|
|
23e06cedbd | ||
|
|
b369fcde28 | ||
|
|
c294068780 | ||
|
|
8a774a3dd4 | ||
|
|
53a8b5a275 | ||
|
|
bbd03f49a4 | ||
|
|
e31578e03c | ||
|
|
0855608bc1 | ||
|
|
f8dbf8292a | ||
|
|
144daec800 | ||
|
|
6a832b7173 | ||
|
|
184a9c8da6 | ||
|
|
88592a1779 | ||
|
|
92fa30a787 | ||
|
|
e4dfe78ef0 | ||
|
|
ba84eecd94 | ||
|
|
ea12d76c03 | ||
|
|
5f0a8a4e28 | ||
|
|
2fc095cd3e | ||
|
|
a2341cc412 | ||
|
|
9685be64cd | ||
|
|
39f5059d48 | ||
|
|
a30e80564d | ||
|
|
8e107b9657 | ||
|
|
21a0693b79 | ||
|
|
4846d9393d | ||
|
|
fc4f20d52f | ||
|
|
60558b5d37 | ||
|
|
5990573ccd | ||
|
|
bd11d3cb62 | ||
|
|
5e5578d2c3 | ||
|
|
1318c6aec8 | ||
|
|
f29757de3b | ||
|
|
f397c35935 | ||
|
|
f365230aea | ||
|
|
ff0b8e10af | ||
|
|
8d16a5693c | ||
|
|
781142a73f | ||
|
|
f471a7e3f5 | ||
|
|
d7a1fd2a6b | ||
|
|
7782eda88e | ||
|
|
d08453d402 | ||
|
|
71e98ea584 | ||
|
|
42d997f639 | ||
|
|
571b4c060b | ||
|
|
ff72059a94 | ||
|
|
2e6ef4f6ec | ||
|
|
0ec6dd9f4b | ||
|
|
0b7fdf16a2 | ||
|
|
5edfd31a6d | ||
|
|
7ee7bc87ae | ||
|
|
1433558c01 | ||
|
|
0458b961c5 | ||
|
|
c1998c4efe | ||
|
|
49da220b65 | ||
|
|
554ee0d963 | ||
|
|
2d2533a08a | ||
|
|
733b072d4f | ||
|
|
2d01a65e36 | ||
|
|
b8280521a5 | ||
|
|
60e6af2605 | ||
|
|
9d16822c63 | ||
|
|
38a0946071 | ||
|
|
95e52e1ac3 | ||
|
|
51ab1c940a | ||
|
|
6f30427357 | ||
|
|
3220acc729 | ||
|
|
3c97933416 | ||
|
|
039e2a9649 | ||
|
|
1c01d0b84a | ||
|
|
39eac7a765 | ||
|
|
082a7065b1 | ||
|
|
f7b08a6982 | ||
|
|
37e32d8c80 | ||
|
|
f2a1b991de | ||
|
|
4128e696d6 | ||
|
|
7e7f3de355 | ||
|
|
1f6a1cd26d | ||
|
|
2cfe2354df | ||
|
|
13387c0838 | ||
|
|
5babf2dc5c | ||
|
|
9012d7c6c1 | ||
|
|
df1faa9a8f | ||
|
|
3de7ad5223 | ||
|
|
9cb3a68c38 | ||
|
|
c1dd76788d | ||
|
|
5ee1816a71 | ||
|
|
63b51c6742 | ||
|
|
e7684b7ed5 | ||
|
|
dda23baf42 | ||
|
|
8575abf599 | ||
|
|
feea0532cd | ||
|
|
d3e8ae1820 | ||
|
|
91a9a959a2 | ||
|
|
04eae51d11 | ||
|
|
8fb707e16d | ||
|
|
4138d5aa75 | ||
|
|
fc654a4cec | ||
|
|
26b5f55cba | ||
|
|
3f572e6bf2 | ||
|
|
941ad6bc62 | ||
|
|
5d1d93e163 | ||
|
|
35fba5bfdd | ||
|
|
887834da91 | ||
|
|
107293c80e | ||
|
|
e3c4ebd59a | ||
|
|
d99ffde7c0 | ||
|
|
198c34ce21 | ||
|
|
0eba88bbfe | ||
|
|
aeea4430d5 | ||
|
|
4b15c4215c | ||
|
|
50452207d9 | ||
|
|
01fcad9b9c | ||
|
|
eb41253764 | ||
|
|
89625e54cf | ||
|
|
58f7141c96 | ||
|
|
e56c6402a7 | ||
|
|
d0eb8ddc30 | ||
|
|
a6c28a5faa | ||
|
|
d35bd15762 | ||
|
|
8b8220c4f7 | ||
|
|
5fe3b0ad71 | ||
|
|
4c8c87a935 | ||
|
|
bb10a51b39 | ||
|
|
df01f7a4eb | ||
|
|
e84790ef79 | ||
|
|
369a8ee17b | ||
|
|
84e21ade6b | ||
|
|
703b0535a4 | ||
|
|
155264ae12 | ||
|
|
31e2ce03c3 | ||
|
|
e969505ae4 | ||
|
|
26e2f1a998 | ||
|
|
2682d5a9cf | ||
|
|
2191592e80 | ||
|
|
18f758294e | ||
|
|
f95c1c61dd | ||
|
|
8c8dcdd521 | ||
|
|
141c133818 | ||
|
|
0f03e55cd1 | ||
|
|
9e6ba92a11 | ||
|
|
762561f88e | ||
|
|
084fe38922 | ||
|
|
63a2a935fc | ||
|
|
64fce8438b | ||
|
|
f92beb4e14 | ||
|
|
f7ce2e8d95 | ||
|
|
3975d82b3b | ||
|
|
d87aa33ec5 | ||
|
|
1b78f4d1ea | ||
|
|
b3704597f3 | ||
|
|
16f797a7d7 | ||
|
|
ee700ec01a | ||
|
|
9b3c951ab7 | ||
|
|
22d17e79e3 | ||
|
|
6d3088a00b | ||
|
|
84202c7471 | ||
|
|
96a05516f9 | ||
|
|
4f6a942595 | ||
|
|
c4b0a37b21 | ||
|
|
9322f4baef | ||
|
|
fa0a1e7261 | ||
|
|
4ad08794c9 | ||
|
|
c0f600764b | ||
|
|
f139e07380 | ||
|
|
c6786eeb2d | ||
|
|
57b85b8155 | ||
|
|
2b1194c57e | ||
|
|
e6dd121f52 | ||
|
|
e600217666 | ||
|
|
bc17ca7ed9 | ||
|
|
1916410316 | ||
|
|
dddfbec92a | ||
|
|
75a88de55c | ||
|
|
2466f4d152 | ||
|
|
39283c8a35 | ||
|
|
46c2f55545 | ||
|
|
fc2afcbcbd | ||
|
|
fa0a9653d2 | ||
|
|
181267e20e | ||
|
|
75e8ea383c | ||
|
|
8c8b58a7de | ||
|
|
b961e07c57 | ||
|
|
0b80d1481a | ||
|
|
89550e7121 | ||
|
|
370c218c63 | ||
|
|
b972dcb0ae | ||
|
|
0bfa9811f7 | ||
|
|
aa9b2c31f4 | ||
|
|
cff75db6a4 | ||
|
|
75252e4a89 | ||
|
|
2089405e1b | ||
|
|
a379eec9d9 | ||
|
|
45d5339fcb | ||
|
|
bb5637d46a | ||
|
|
1f05d5bf4a | ||
|
|
ff87da9c3b | ||
|
|
3d81b75f44 | ||
|
|
baba6d67e6 | ||
|
|
04c0564fe2 | ||
|
|
91cfdb81f5 | ||
|
|
deae7bf33c | ||
|
|
04a0da1f92 | ||
|
|
9486df0226 | ||
|
|
948a5d25c2 | ||
|
|
f7c31cd210 | ||
|
|
696e7b2833 | ||
|
|
e76cf1217f | ||
|
|
543e37f662 | ||
|
|
c514cb752d | ||
|
|
c0ca93ae6f | ||
|
|
38a89d49ae | ||
|
|
6531126eb2 | ||
|
|
25d0e59ef8 | ||
|
|
b0db08fd2b | ||
|
|
07addf74fd | ||
|
|
52c7c738ca | ||
|
|
5c32b32011 | ||
|
|
fe61cff079 | ||
|
|
fbab1e55bb | ||
|
|
1bfd07567e | ||
|
|
f97c4c8d9d | ||
|
|
a3c55462a8 | ||
|
|
bbb9a504a8 | ||
|
|
dedc7d885c | ||
|
|
c5ac96e9e7 | ||
|
|
9959c5f17f | ||
|
|
e8d0a363fc | ||
|
|
935b7c1c0f | ||
|
|
15ce0ae57c | ||
|
|
67703a73de | ||
|
|
f96ce5674b | ||
|
|
7f0b204292 | ||
|
|
83b1ae4833 | ||
|
|
753cc63d96 | ||
|
|
5dac8e055f | ||
|
|
c3a8eb1c10 | ||
|
|
0f2a5403db | ||
|
|
dcce84714e | ||
|
|
eb8130f48a | ||
|
|
aa58f66806 | ||
|
|
a3dc591b8e | ||
|
|
5ee1bd7ba4 | ||
|
|
dbedf33b9f | ||
|
|
0f02c9540c | ||
|
|
06922674c8 | ||
|
|
8ad7da066c | ||
|
|
e1503add41 | ||
|
|
6fea75afde | ||
|
|
6a773289e7 | ||
|
|
ade252f13b | ||
|
|
bb2e361004 | ||
|
|
b24facb73d | ||
|
|
014d58a757 | ||
|
|
1d16e16b30 | ||
|
|
249a523dd3 | ||
|
|
8d72ef8d1e | ||
|
|
bc8f0208aa | ||
|
|
ee25b6106a | ||
|
|
5c1b135304 | ||
|
|
2f2029fed5 | ||
|
|
57273d364b | ||
|
|
84289d1d69 | ||
|
|
98e2746e31 | ||
|
|
c00ec0cbe4 | ||
|
|
1a40bceb1d | ||
|
|
411a6cc472 | ||
|
|
1e2676df26 | ||
|
|
364fca5cea | ||
|
|
87e1efa997 | ||
|
|
6709084e2f | ||
|
|
6b1f915ebc | ||
|
|
78b9bd77f5 | ||
|
|
a9273c5da5 | ||
|
|
14128656db | ||
|
|
1557287c64 | ||
|
|
e7e467fb3a | ||
|
|
5fde7d8b12 | ||
|
|
3c086f5f7f | ||
|
|
c0084f43dd | ||
|
|
ddbd4fd881 | ||
|
|
7826e39fcf | ||
|
|
06ae4258be | ||
|
|
d9037fe2be | ||
|
|
1d14972e41 | ||
|
|
05fa9cb379 | ||
|
|
59e14c25df | ||
|
|
fc640d3a09 | ||
|
|
e1f67295b4 | ||
|
|
22ac80e83a | ||
|
|
c7aa6b587b | ||
|
|
8d1848bebe | ||
|
|
527c0af1c3 | ||
|
|
a20fae0364 | ||
|
|
15b1a1f909 | ||
|
|
80b25daac7 | ||
|
|
70b30d5ca4 | ||
|
|
0b2fc621fc | ||
|
|
171e39b230 | ||
|
|
690a44e40e | ||
|
|
d9a3b26e47 | ||
|
|
1eec59e091 | ||
|
|
96ce49ec4e | ||
|
|
ae63e4b4f0 | ||
|
|
e2fb588eb9 | ||
|
|
382a6863b5 | ||
|
|
7b975bc1ff | ||
|
|
467fe30a5e | ||
|
|
4415aa5c2e | ||
|
|
17ab38502d | ||
|
|
9fa8c959ee | ||
|
|
f29c6049fc | ||
|
|
e44fa5db8e | ||
|
|
03ea05b860 | ||
|
|
b8678c9d4b | ||
|
|
13823a7743 | ||
|
|
b94d87ae2d | ||
|
|
e0c5f7ff1b | ||
|
|
b22ecbe174 | ||
|
|
c41be436c6 | ||
|
|
e022ffce0f | ||
|
|
cfe65f1e72 | ||
|
|
b18595ae07 | ||
|
|
d27630626a | ||
|
|
c473c7cb53 | ||
|
|
ef3526b3b8 | ||
|
|
d4ee7277c0 | ||
|
|
4a3efa5d45 | ||
|
|
a14f0d46d7 | ||
|
|
a25875170b | ||
|
|
a288646419 | ||
|
|
b3d8bc61ac | ||
|
|
7accd30da8 | ||
|
|
9594fd0a0c | ||
|
|
aac84c554a | ||
|
|
5716a58413 | ||
|
|
233507bfe0 | ||
|
|
5b27702b61 | ||
|
|
b2a6a97443 | ||
|
|
2543278c3f | ||
|
|
19cf3bb9e7 | ||
|
|
3c44ef788a | ||
|
|
e5663de09e | ||
|
|
7cfd4e56f8 | ||
|
|
282540c2d4 | ||
|
|
1e7a7d756f | ||
|
|
f6ee0795ac | ||
|
|
eb5a95e7de | ||
|
|
5b1c162fb2 | ||
|
|
d51501938a | ||
|
|
a823d518ac | ||
|
|
87d4e32a6b | ||
|
|
820d2a7149 | ||
|
|
9fe39f25e1 | ||
|
|
1b2cc781e5 | ||
|
|
e05ec2b77e | ||
|
|
9e3ea3c6ac | ||
|
|
0fb12112f5 | ||
|
|
1b95ca2852 | ||
|
|
e07a850be3 | ||
|
|
3fccce625c | ||
|
|
a1f935e815 | ||
|
|
22ee4151fd | ||
|
|
cc23ad71ce | ||
|
|
57b9fff904 | ||
|
|
692ad482dc | ||
|
|
c6f1c3c7f6 | ||
|
|
164d1e05ca | ||
|
|
c644241392 | ||
|
|
89be5cadaa | ||
|
|
f326f94b97 | ||
|
|
3d2117887d | ||
|
|
5a6750e1cd | ||
|
|
6b8b9d19f3 | ||
|
|
4ca26eb38c | ||
|
|
37b2754f37 | ||
|
|
172beb2ae3 | ||
|
|
5eba392a04 | ||
|
|
deda093637 | ||
|
|
a4c4019032 | ||
|
|
2e37942592 | ||
|
|
09d7bd2d40 | ||
|
|
ff0efb1501 | ||
|
|
0f1d4a7ca8 | ||
|
|
a0b3fd3a33 | ||
|
|
cdbe3691b7 | ||
|
|
3a0b3b0f6e | ||
|
|
d3afef3e1b | ||
|
|
f4aaec9ce5 | ||
|
|
bd5d326160 | ||
|
|
5b9b9f1572 | ||
|
|
571c8754de | ||
|
|
fb9a95e68e | ||
|
|
85e0839c8b | ||
|
|
1749fb8ebf | ||
|
|
e114be11ec | ||
|
|
b709f73aab | ||
|
|
05a615ef22 | ||
|
|
76450c01f3 | ||
|
|
86e64c626c | ||
|
|
9b827be418 | ||
|
|
7e5c6725c1 | ||
|
|
543d75723b | ||
|
|
b4d94f255a | ||
|
|
b0dd218fea |
@@ -4,6 +4,9 @@ os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\ncw\rclone
|
||||
|
||||
cache:
|
||||
- '%LocalAppData%\go-build'
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
|
||||
@@ -43,4 +46,4 @@ artifacts:
|
||||
- path: build/*-v*.zip
|
||||
|
||||
deploy_script:
|
||||
- IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||
- IF "%APPVEYOR_REPO_NAME%" == "ncw/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
version: 2
|
||||
|
||||
jobs:
|
||||
@@ -13,10 +14,10 @@ jobs:
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
docker pull rclone/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
--image=billziss/xgo-cgofuse \
|
||||
--image=rclone/xgo-cgofuse \
|
||||
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
.
|
||||
@@ -29,6 +30,21 @@ jobs:
|
||||
command: |
|
||||
mkdir -p /tmp/rclone.dist
|
||||
cp -R rclone-* /tmp/rclone.dist
|
||||
mkdir build
|
||||
cp -R rclone-* build/
|
||||
|
||||
- run:
|
||||
name: Build rclone
|
||||
command: |
|
||||
go version
|
||||
go build
|
||||
|
||||
- run:
|
||||
name: Upload artifacts
|
||||
command: |
|
||||
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
|
||||
make circleci_upload
|
||||
fi
|
||||
|
||||
- store_artifacts:
|
||||
path: /tmp/rclone.dist
|
||||
|
||||
31
.github/ISSUE_TEMPLATE.md
vendored
Normal file
31
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
<!--
|
||||
|
||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue for a quick response.
|
||||
|
||||
If you are reporting a bug or asking for a new feature then please use one of the templates here:
|
||||
|
||||
https://github.com/ncw/rclone/issues/new
|
||||
|
||||
otherwise fill in the form below.
|
||||
|
||||
Thank you
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
|
||||
#### Output of `rclone version`
|
||||
|
||||
|
||||
|
||||
#### Describe the issue
|
||||
|
||||
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a problem with rclone
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
Hi!
|
||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||
|
||||
We understand you are having a problem with rclone or have an idea for an improvement - we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue. We'll reply quickly and it won't increase our massive issue backlog.
|
||||
instead of filing an issue for a quick response.
|
||||
|
||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
||||
|
||||
@@ -16,9 +19,7 @@ If you think you might have found a bug, please can you try to replicate it with
|
||||
|
||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
||||
|
||||
If you have an idea for an improvement, then please search the old issues first and if you don't find your idea, make a new issue.
|
||||
|
||||
Thanks
|
||||
Thank you
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
@@ -27,17 +28,23 @@ The Rclone Developers
|
||||
#### What is the problem you are having with rclone?
|
||||
|
||||
|
||||
#### What is your rclone version (eg output from `rclone -V`)
|
||||
|
||||
#### What is your rclone version (output from `rclone version`)
|
||||
|
||||
|
||||
|
||||
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
|
||||
|
||||
|
||||
#### Which cloud storage system are you using? (eg Google Drive)
|
||||
|
||||
|
||||
|
||||
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
|
||||
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
36
.github/ISSUE_TEMPLATE/Feature.md
vendored
Normal file
36
.github/ISSUE_TEMPLATE/Feature.md
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest a new feature or enhancement for rclone
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
Welcome :-)
|
||||
|
||||
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||
|
||||
Here is a checklist of things to do:
|
||||
|
||||
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
|
||||
2. Discuss on the forum first: https://forum.rclone.org/
|
||||
3. Make a feature request issue (this is the right place!).
|
||||
4. Be prepared to get involved making the feature :-)
|
||||
|
||||
Looking forward to your great idea!
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
|
||||
#### What is your current rclone version (output from `rclone version`)?
|
||||
|
||||
|
||||
|
||||
#### What problem are you are trying to solve?
|
||||
|
||||
|
||||
|
||||
#### How do you think rclone should be changed to solve that?
|
||||
|
||||
|
||||
29
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
29
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
<!--
|
||||
Thank you very much for contributing code or documentation to rclone! Please
|
||||
fill out the following questions to make it easier for us to review your
|
||||
changes.
|
||||
|
||||
You do not need to check all the boxes below all at once, feel free to take
|
||||
your time and add more commits. If you're done and ready for review, please
|
||||
check the last box.
|
||||
-->
|
||||
|
||||
#### What is the purpose of this change?
|
||||
|
||||
<!--
|
||||
Describe the changes here
|
||||
-->
|
||||
|
||||
#### Was the change discussed in an issue or in the forum before?
|
||||
|
||||
<!--
|
||||
Link issues and relevant forum posts here.
|
||||
-->
|
||||
|
||||
#### Checklist
|
||||
|
||||
- [ ] I have read the [contribution guidelines](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||
- [ ] I have added tests for all changes in this PR if appropriate.
|
||||
- [ ] I have added documentation for the changes if appropriate.
|
||||
- [ ] All commit messages are in [house style](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||
- [ ] I'm done, this Pull Request is ready for review :-)
|
||||
30
.golangci.yml
Normal file
30
.golangci.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
# golangci-lint configuration options
|
||||
|
||||
run:
|
||||
build-tags:
|
||||
- cmount
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- golint
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
- govet
|
||||
- unconvert
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
|
||||
issues:
|
||||
# Enable some lints excluded by default
|
||||
exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"Enable": [
|
||||
"deadcode",
|
||||
"errcheck",
|
||||
"goimports",
|
||||
"golint",
|
||||
"ineffassign",
|
||||
"structcheck",
|
||||
"varcheck",
|
||||
"vet"
|
||||
],
|
||||
"EnableGC": true,
|
||||
"Vendor": true
|
||||
}
|
||||
15
.travis.yml
15
.travis.yml
@@ -4,12 +4,13 @@ dist: trusty
|
||||
os:
|
||||
- linux
|
||||
go:
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12rc1
|
||||
- tip
|
||||
go_import_path: github.com/ncw/rclone
|
||||
before_install:
|
||||
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
|
||||
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
|
||||
@@ -24,6 +25,7 @@ script:
|
||||
env:
|
||||
global:
|
||||
- GOTAGS=cmount
|
||||
- GO111MODULE=off
|
||||
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
|
||||
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
|
||||
@@ -34,18 +36,25 @@ addons:
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
include:
|
||||
- os: osx
|
||||
go: 1.11.x
|
||||
go: 1.12rc1
|
||||
env: GOTAGS=""
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/Library/Caches/go-build
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: ncw/rclone
|
||||
all_branches: true
|
||||
go: 1.11.x
|
||||
go: 1.12rc1
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
|
||||
@@ -21,14 +21,14 @@ with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
## Submitting a pull request ##
|
||||
|
||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||
like to implement then please submit a pull request via Github.
|
||||
like to implement then please submit a pull request via GitHub.
|
||||
|
||||
If it is a big feature then make an issue first so it can be discussed.
|
||||
|
||||
You'll need a Go environment set up with GOPATH set. See [the Go
|
||||
getting started docs](https://golang.org/doc/install) for more info.
|
||||
|
||||
First in your web browser press the fork button on [rclone's Github
|
||||
First in your web browser press the fork button on [rclone's GitHub
|
||||
page](https://github.com/ncw/rclone).
|
||||
|
||||
Now in your terminal
|
||||
@@ -64,22 +64,31 @@ packages which you can install with
|
||||
|
||||
Make sure you
|
||||
|
||||
* Add documentation for a new feature (see below for where)
|
||||
* Add unit tests for a new feature
|
||||
* Add [documentation](#writing-documentation) for a new feature.
|
||||
* Follow the [commit message guidelines](#commit-messages).
|
||||
* Add [unit tests](#testing) for a new feature
|
||||
* squash commits down to one per feature
|
||||
* rebase to master `git rebase master`
|
||||
* rebase to master with `git rebase master`
|
||||
|
||||
When you are done with that
|
||||
|
||||
git push origin my-new-feature
|
||||
git push origin my-new-feature
|
||||
|
||||
Go to the Github website and click [Create pull
|
||||
Go to the GitHub website and click [Create pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
You patch will get reviewed and you might get asked to fix some stuff.
|
||||
|
||||
If so, then make the changes in the same branch, squash the commits,
|
||||
rebase it to master then push it to Github with `--force`.
|
||||
rebase it to master then push it to GitHub with `--force`.
|
||||
|
||||
## Enabling CI for your fork ##
|
||||
|
||||
The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
|
||||
|
||||
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
|
||||
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
|
||||
websites, find your fork of rclone, and enable building there.
|
||||
|
||||
## Testing ##
|
||||
|
||||
@@ -114,6 +123,13 @@ but they can be run against any of the remotes.
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
|
||||
If you want to use the integration test framework to run these tests
|
||||
all together with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/ncw/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
|
||||
@@ -173,10 +189,14 @@ with modules beneath.
|
||||
|
||||
If you are adding a new feature then please update the documentation.
|
||||
|
||||
If you add a new flag, then if it is a general flag, document it in
|
||||
If you add a new general flag (not for a backend), then document it in
|
||||
`docs/content/docs.md` - the flags there are supposed to be in
|
||||
alphabetical order. If it is a remote specific flag, then document it
|
||||
in `docs/content/remote.md`.
|
||||
alphabetical order.
|
||||
|
||||
If you add a new backend option/flag, then it should be documented in
|
||||
the source file in the `Help:` field. The first line of this is used
|
||||
for the flag help, the remainder is shown to the user in `rclone
|
||||
config` and is added to the docs with `make backenddocs`.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The MANUAL.*, rclone.1, web site etc are all auto generated
|
||||
@@ -195,14 +215,20 @@ file.
|
||||
## Commit messages ##
|
||||
|
||||
Please make the first line of your commit message a summary of the
|
||||
change, and prefix it with the directory of the change followed by a
|
||||
colon. The changelog gets made by looking at just these first lines
|
||||
so make it good!
|
||||
change that a user (not a developer) of rclone would like to read, and
|
||||
prefix it with the directory of the change followed by a colon. The
|
||||
changelog gets made by looking at just these first lines so make it
|
||||
good!
|
||||
|
||||
If you have more to say about the commit, then enter a blank line and
|
||||
carry on the description. Remember to say why the change was needed -
|
||||
the commit itself shows what was changed.
|
||||
|
||||
Writing more is better than less. Comparing the behaviour before the
|
||||
change to that after the change is very useful. Imagine you are
|
||||
writing to yourself in 12 months time when you've forgotten everything
|
||||
about what you just did and you need to get up to speed quickly.
|
||||
|
||||
If the change fixes an issue then write `Fixes #1234` in the commit
|
||||
message. This can be on the subject line if it will fit. If you
|
||||
don't want to close the associated issue just put `#1234` and the
|
||||
@@ -250,9 +276,8 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
||||
instructions below. These will fetch the dependency, add it to
|
||||
`go.mod` and `go.sum` and vendor it for older go versions.
|
||||
|
||||
export GO111MODULE=on
|
||||
go get github.com/ncw/new_dependency
|
||||
go mod vendor
|
||||
GO111MODULE=on go get github.com/ncw/new_dependency
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
You can add constraints on that package when doing `go get` (see the
|
||||
go docs linked above), but don't unless you really need to.
|
||||
@@ -267,9 +292,8 @@ in `vendor`.
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
export GO111MODULE=on
|
||||
go get -u github.com/pkg/errors
|
||||
go mod vendor
|
||||
GO111MODULE=on go get -u github.com/pkg/errors
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
Check in in a single commit as above.
|
||||
|
||||
@@ -326,7 +350,13 @@ Unit tests
|
||||
|
||||
Integration tests
|
||||
|
||||
* Add your fs to `fstest/test_all/test_all.go`
|
||||
* Add your backend to `fstest/test_all/config.yaml`
|
||||
* Once you've done that then you can use the integration test framework from the project root:
|
||||
* go install ./...
|
||||
* test_all -backend remote
|
||||
|
||||
Or if you want to run the integration tests manually:
|
||||
|
||||
* Make sure integration tests pass with
|
||||
* `cd fs/operations`
|
||||
* `go test -v -remote TestRemote:`
|
||||
@@ -341,11 +371,10 @@ See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
|
||||
|
||||
* `README.md` - main Github page
|
||||
* `docs/content/remote.md` - main docs page
|
||||
* `README.md` - main GitHub page
|
||||
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
* `docs/content/overview.md` - overview docs
|
||||
* `docs/content/docs.md` - list of remotes in config section
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
* `cmd/cmd.go` - the main help for rclone
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
# Maintainers guide for rclone #
|
||||
|
||||
Current active maintainers of rclone are
|
||||
Current active maintainers of rclone are:
|
||||
|
||||
* Nick Craig-Wood @ncw
|
||||
* Stefan Breunig @breunigs
|
||||
* Ishuah Kariuki @ishuah
|
||||
* Remus Bunduc @remusb - cache subsystem maintainer
|
||||
* Fabian Möller @B4dM4n
|
||||
| Name | GitHub ID | Specific Responsibilities |
|
||||
| :--------------- | :---------- | :-------------------------- |
|
||||
| Nick Craig-Wood | @ncw | overall project health |
|
||||
| Stefan Breunig | @breunigs | |
|
||||
| Ishuah Kariuki | @ishuah | |
|
||||
| Remus Bunduc | @remusb | cache backend |
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
@@ -56,7 +61,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
|
||||
|
||||
Try to process pull requests promptly!
|
||||
|
||||
Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
|
||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||
|
||||
|
||||
5346
MANUAL.html
5346
MANUAL.html
File diff suppressed because it is too large
Load Diff
5879
MANUAL.txt
5879
MANUAL.txt
File diff suppressed because it is too large
Load Diff
60
Makefile
60
Makefile
@@ -11,7 +11,7 @@ ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifneq ($(TAG),$(LAST_TAG))
|
||||
TAG := $(TAG)-beta
|
||||
endif
|
||||
@@ -50,10 +50,9 @@ version:
|
||||
|
||||
# Full suite of integration tests
|
||||
test: rclone
|
||||
go install github.com/ncw/rclone/fstest/test_all
|
||||
-go test -v -count 1 $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
|
||||
-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
|
||||
@echo "Written logs in test.log and fs/test_all.log"
|
||||
go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all
|
||||
-test_all 2>&1 | tee test_all.log
|
||||
@echo "Written logs in test_all.log"
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
@@ -65,30 +64,20 @@ endif
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
ifdef FULL_TESTS
|
||||
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||
errcheck $(BUILDTAGS) ./...
|
||||
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
|
||||
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
|
||||
@# we still run go vet for -printfuncs which golangci-lint doesn't do yet
|
||||
@# see: https://github.com/golangci/golangci-lint/issues/204
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
@go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||
@golangci-lint run ./...
|
||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||
else
|
||||
@echo Skipping source quality tests as version of go too old
|
||||
endif
|
||||
|
||||
gometalinter_install:
|
||||
go get -u github.com/alecthomas/gometalinter
|
||||
gometalinter --install --update
|
||||
|
||||
# We aren't using gometalinter as the default linter yet because
|
||||
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
|
||||
# 2. can't get -printfuncs working with the vet linter
|
||||
gometalinter:
|
||||
gometalinter ./...
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
ifdef FULL_TESTS
|
||||
go get -u github.com/kisielk/errcheck
|
||||
go get -u golang.org/x/tools/cmd/goimports
|
||||
go get -u github.com/golang/lint/golint
|
||||
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||
endif
|
||||
|
||||
# Get the release dependencies
|
||||
@@ -99,15 +88,15 @@ release_dep:
|
||||
# Update dependencies
|
||||
update:
|
||||
GO111MODULE=on go get -u ./...
|
||||
GO111MODULE=on go tidy
|
||||
GO111MODULE=on go vendor
|
||||
GO111MODULE=on go mod tidy
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
rclone.1: MANUAL.md
|
||||
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
|
||||
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
|
||||
./bin/make_manual.py
|
||||
|
||||
MANUAL.html: MANUAL.md
|
||||
@@ -117,7 +106,10 @@ MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
rclone gendocs docs/content/commands/
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
./bin/make_backend_docs.py
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
@@ -152,8 +144,8 @@ check_sign:
|
||||
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
||||
|
||||
upload:
|
||||
rclone -v copy --exclude '*current*' build/ memstore:downloads-rclone-org/$(TAG)
|
||||
rclone -v copy --include '*current*' --include version.txt build/ memstore:downloads-rclone-org
|
||||
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
|
||||
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
|
||||
|
||||
upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
@@ -183,6 +175,13 @@ ifndef BRANCH_PATH
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
circleci_upload:
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifndef BRANCH_PATH
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
@@ -190,7 +189,7 @@ endif
|
||||
|
||||
travis_beta:
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
|
||||
endif
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)
|
||||
@@ -202,7 +201,7 @@ endif
|
||||
|
||||
# Fetch the binary builds from travis and appveyor
|
||||
fetch_binaries:
|
||||
rclone -v sync $(BETA_UPLOAD) build/
|
||||
rclone -P sync $(BETA_UPLOAD) build/
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server -v -w
|
||||
@@ -229,4 +228,3 @@ startdev:
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
|
||||
106
README.md
106
README.md
@@ -2,10 +2,11 @@
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
[Download](https://rclone.org/downloads/) |
|
||||
[Contributing](CONTRIBUTING.md) |
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
[Forum](https://forum.rclone.org/)
|
||||
[Forum](https://forum.rclone.org/) |
|
||||
[G+](https://google.com/+RcloneOrg)
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone)
|
||||
@@ -13,50 +14,83 @@
|
||||
[](https://circleci.com/gh/ncw/rclone/tree/master)
|
||||
[](https://godoc.org/github.com/ncw/rclone)
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
# Rclone
|
||||
|
||||
* Amazon Drive ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
|
||||
* Backblaze B2
|
||||
* Box
|
||||
* Dropbox
|
||||
* FTP
|
||||
* Google Cloud Storage
|
||||
* Google Drive
|
||||
* HTTP
|
||||
* Hubic
|
||||
* Jottacloud
|
||||
* Mega
|
||||
* Microsoft Azure Blob Storage
|
||||
* Microsoft OneDrive
|
||||
* OpenDrive
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
|
||||
* pCloud
|
||||
* QingStor
|
||||
* SFTP
|
||||
* Webdav / Owncloud / Nextcloud
|
||||
* Yandex Disk
|
||||
* The local filesystem
|
||||
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
|
||||
|
||||
Features
|
||||
## Storage providers
|
||||
|
||||
* MD5/SHA1 hashes checked at all times for file integrity
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
|
||||
## Features
|
||||
|
||||
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* Copy mode to just copy new/changed files
|
||||
* Sync (one way) mode to make a directory identical
|
||||
* Check mode to check for file hash equality
|
||||
* Can sync to and from network, eg two different cloud accounts
|
||||
* Optional encryption (Crypt)
|
||||
* Optional FUSE mount
|
||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
|
||||
See the home page for installation, usage, documentation, changelog
|
||||
and configuration walkthroughs.
|
||||
## Installation & documentation
|
||||
|
||||
* https://rclone.org/
|
||||
Please see the [rclone website](https://rclone.org/) for:
|
||||
|
||||
* [Installation](https://rclone.org/install/)
|
||||
* [Documentation & configuration](https://rclone.org/docs/)
|
||||
* [Changelog](https://rclone.org/changelog/)
|
||||
* [FAQ](https://rclone.org/faq/)
|
||||
* [Storage providers](https://rclone.org/overview/)
|
||||
* [Forum](https://forum.rclone.org/)
|
||||
* ...and more
|
||||
|
||||
## Downloads
|
||||
|
||||
* https://rclone.org/downloads/
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
COPYING file included in this package).
|
||||
[COPYING file](/COPYING) included in this package).
|
||||
|
||||
37
RELEASE.md
37
RELEASE.md
@@ -31,3 +31,40 @@ Early in the next release cycle update the vendored dependencies
|
||||
* git status
|
||||
* git add new files
|
||||
* git commit -a -v
|
||||
|
||||
If `make update` fails with errors like this:
|
||||
|
||||
```
|
||||
# github.com/cpuguy83/go-md2man/md2man
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
||||
```
|
||||
|
||||
Can be fixed with
|
||||
|
||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
||||
* GO111MODULE=on go mod tidy
|
||||
* GO111MODULE=on go mod vendor
|
||||
|
||||
|
||||
Making a point release. If rclone needs a point release due to some
|
||||
horrendous bug, then
|
||||
* git branch v1.XX v1.XX-fixes
|
||||
* git cherry-pick any fixes
|
||||
* Test (see above)
|
||||
* make NEW_TAG=v1.XX.1 tag
|
||||
* edit docs/content/changelog.md
|
||||
* make TAG=v1.43.1 doc
|
||||
* git commit -a -v -m "Version v1.XX.1"
|
||||
* git tag -d -v1.XX.1
|
||||
* git tag -s -m "Version v1.XX.1" v1.XX.1
|
||||
* git push --tags -u origin v1.XX-fixes
|
||||
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
|
||||
* make TAG=v1.43.1 tarball
|
||||
* make TAG=v1.43.1 sign_upload
|
||||
* make TAG=v1.43.1 check_sign
|
||||
* make TAG=v1.43.1 upload
|
||||
* make TAG=v1.43.1 upload_website
|
||||
* make TAG=v1.43.1 upload_github
|
||||
* NB this overwrites the current beta so after the release, rebuild the last travis build
|
||||
* Announce!
|
||||
|
||||
821
backend/adb/adb.go
Normal file
821
backend/adb/adb.go
Normal file
@@ -0,0 +1,821 @@
|
||||
package adb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
adb "github.com/thinkhy/go-adb"
|
||||
"github.com/thinkhy/go-adb/wire"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "adb",
|
||||
Description: "Android Debug Bridge",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "serial",
|
||||
Help: "The device serial to use. Leave empty for auto selection.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "host",
|
||||
Default: "localhost",
|
||||
Help: "The ADB server host.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Default: 5037,
|
||||
Help: "The ADB server port.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "executable",
|
||||
Help: "The ADB executable path.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Serial string
|
||||
Host string
|
||||
Port uint16
|
||||
Executable string
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
}
|
||||
|
||||
// Fs represents a adb device
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
client *adb.Adb
|
||||
device *execDevice
|
||||
statFunc statFunc
|
||||
statFuncMu sync.Mutex
|
||||
touchFunc touchFunc
|
||||
touchFuncMu sync.Mutex
|
||||
}
|
||||
|
||||
// Object describes a adb file
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("ADB root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if root == "" {
|
||||
root = "/"
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
statFunc: (*Object).statTry,
|
||||
touchFunc: (*Object).touchTry,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
|
||||
f.client, err = adb.NewWithConfig(adb.ServerConfig{
|
||||
Host: opt.Host,
|
||||
Port: int(opt.Port),
|
||||
PathToAdb: opt.Executable,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not configure ADB server")
|
||||
}
|
||||
err = f.client.StartServer()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not start ADB server")
|
||||
}
|
||||
|
||||
serverVersion, err := f.client.ServerVersion()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get ADB server version")
|
||||
}
|
||||
fs.Debugf(f, "ADB server version: 0x%X", serverVersion)
|
||||
|
||||
serials, err := f.client.ListDeviceSerials()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get ADB devices")
|
||||
}
|
||||
descriptor := adb.AnyDevice()
|
||||
if opt.Serial != "" {
|
||||
descriptor = adb.DeviceWithSerial(opt.Serial)
|
||||
}
|
||||
if len(serials) > 1 && opt.Serial == "" {
|
||||
return nil, errors.New("Multiple ADB devices found. Use the serial config to select a specific device")
|
||||
}
|
||||
f.device = &execDevice{f.client.Device(descriptor)}
|
||||
|
||||
// follow symlinks for root pathes
|
||||
entry, err := f.newEntryFollowSymlinks("")
|
||||
switch err {
|
||||
case nil:
|
||||
case fs.ErrorObjectNotFound:
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
switch entry.(type) {
|
||||
case fs.Object:
|
||||
f.root = path.Dir(f.root)
|
||||
return f, fs.ErrorIsFile
|
||||
case nil:
|
||||
return f, nil
|
||||
case fs.Directory:
|
||||
return f, nil
|
||||
default:
|
||||
return nil, errors.Errorf("Invalid root entry type %t", entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Precision of the object storage system
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return 1 * time.Second
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
p := path.Join(f.root, dir)
|
||||
dirEntries, err := f.device.ListDirEntries(p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ListDirEntries")
|
||||
}
|
||||
|
||||
defer fs.CheckClose(dirEntries, &err)
|
||||
|
||||
found := false
|
||||
for dirEntries.Next() {
|
||||
found = true
|
||||
dirEntry := dirEntries.Entry()
|
||||
switch dirEntry.Name {
|
||||
case ".", "..":
|
||||
continue
|
||||
}
|
||||
fsEntry, err := f.entryForDirEntry(path.Join(dir, dirEntry.Name), dirEntry, f.opt.FollowSymlinks)
|
||||
if err != nil {
|
||||
fs.Errorf(p, "Listing error: %q: %v", dirEntry.Name, err)
|
||||
return nil, err
|
||||
} else if fsEntry != nil {
|
||||
entries = append(entries, fsEntry)
|
||||
} else {
|
||||
fs.Debugf(f, "Skipping DirEntry %#v", dirEntry)
|
||||
}
|
||||
}
|
||||
err = dirEntries.Err()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ListDirEntries")
|
||||
}
|
||||
if !found {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) entryForDirEntry(remote string, e *adb.DirEntry, followSymlinks bool) (fs.DirEntry, error) {
|
||||
o := f.newObjectWithInfo(remote, e)
|
||||
// Follow symlinks if required
|
||||
if followSymlinks && (e.Mode&os.ModeSymlink) != 0 {
|
||||
err := f.statFunc(&o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if o.mode.IsDir() {
|
||||
return fs.NewDir(remote, o.modTime), nil
|
||||
}
|
||||
return &o, nil
|
||||
}
|
||||
|
||||
func (f *Fs) newEntry(remote string) (fs.DirEntry, error) {
|
||||
return f.newEntryWithFollow(remote, f.opt.FollowSymlinks)
|
||||
}
|
||||
func (f *Fs) newEntryFollowSymlinks(remote string) (fs.DirEntry, error) {
|
||||
return f.newEntryWithFollow(remote, true)
|
||||
}
|
||||
func (f *Fs) newEntryWithFollow(remote string, followSymlinks bool) (fs.DirEntry, error) {
|
||||
entry, err := f.device.Stat(path.Join(f.root, remote))
|
||||
if err != nil {
|
||||
if adb.HasErrCode(err, adb.FileNoExistError) {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, errors.Wrapf(err, "Stat failed")
|
||||
}
|
||||
return f.entryForDirEntry(remote, entry, followSymlinks)
|
||||
}
|
||||
|
||||
func (f *Fs) newObjectWithInfo(remote string, e *adb.DirEntry) Object {
|
||||
return Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(e.Size),
|
||||
mode: e.Mode,
|
||||
modTime: e.ModifiedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
entry, err := f.newEntry(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, ok := entry.(fs.Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
||||
// return an error or upload it properly (rather than e.g. calling panic).
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
// Temporary Object under construction - info filled in by Update()
|
||||
o := f.newObject(remote)
|
||||
err := o.Update(in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// newObject makes a half completed Object
|
||||
func (f *Fs) newObject(remote string) *Object {
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
p := path.Join(f.root, dir)
|
||||
output, code, err := f.device.execCommandWithExitCode("mkdir -p", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
entry, _ := f.newEntry(p)
|
||||
if _, ok := entry.(fs.Directory); ok {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("mkdir %q failed with %d: %q", dir, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "mkdir")
|
||||
}
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
p := path.Join(f.root, dir)
|
||||
output, code, err := f.device.execCommandWithExitCode("rmdir", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("rmdir %q failed with %d: %q", dir, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "rmdir")
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(t time.Time) error {
|
||||
return o.fs.touchFunc(o, t)
|
||||
}
|
||||
|
||||
func (o *Object) stat() error {
|
||||
return o.statStatArg(statArgC, path.Join(o.fs.root, o.remote))
|
||||
}
|
||||
|
||||
func (o *Object) setMetadata(entry *adb.DirEntry) {
|
||||
// Don't overwrite the values if we don't need to
|
||||
// this avoids upsetting the race detector
|
||||
if o.size != int64(entry.Size) {
|
||||
o.size = int64(entry.Size)
|
||||
}
|
||||
if !o.modTime.Equal(entry.ModifiedAt) {
|
||||
o.modTime = entry.ModifiedAt
|
||||
}
|
||||
if o.mode != entry.Mode {
|
||||
o.mode = decodeEntryMode(uint32(entry.Mode))
|
||||
}
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
const blockSize = 1 << 12
|
||||
|
||||
var offset, count int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if offset > o.size {
|
||||
offset = o.size
|
||||
}
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
} else if count+offset > o.size {
|
||||
count = o.size - offset
|
||||
}
|
||||
fs.Debugf(o, "Open: remote: %q offset: %d count: %d", o.remote, offset, count)
|
||||
|
||||
if count == 0 {
|
||||
return ioutil.NopCloser(bytes.NewReader(nil)), nil
|
||||
}
|
||||
offsetBlocks, offsetRest := offset/blockSize, offset%blockSize
|
||||
countBlocks := (count-1)/blockSize + 1
|
||||
|
||||
conn, err := o.fs.device.execCommand(fmt.Sprintf("sh -c 'dd \"if=$0\" bs=%d skip=%d count=%d 2>/dev/null'", blockSize, offsetBlocks, countBlocks), path.Join(o.fs.root, o.remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &adbReader{
|
||||
ReadCloser: readers.NewLimitedReadCloser(conn, count+offsetRest),
|
||||
skip: offsetRest,
|
||||
expected: count,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
for _, option := range options {
|
||||
if option.Mandatory() {
|
||||
fs.Logf(option, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
written, err := o.writeToFile(path.Join(o.fs.root, o.remote), in, 0666, src.ModTime())
|
||||
if err != nil {
|
||||
if removeErr := o.Remove(); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
expected := src.Size()
|
||||
if expected == -1 {
|
||||
expected = written
|
||||
}
|
||||
for _, t := range []int64{100, 250, 500, 1000, 2500, 5000, 10000} {
|
||||
err = o.stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if o.size == expected {
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(o, "Invalid size after update, expected: %d got: %d", expected, o.size)
|
||||
time.Sleep(time.Duration(t) * time.Millisecond)
|
||||
}
|
||||
return o.stat()
|
||||
}
|
||||
|
||||
// Remove this object
|
||||
func (o *Object) Remove() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("rm", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("rm %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "rm")
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) writeToFile(path string, rd io.Reader, perms os.FileMode, modeTime time.Time) (written int64, err error) {
|
||||
dst, err := o.fs.device.OpenWrite(path, perms, modeTime)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fs.CheckClose(dst, &err)
|
||||
return io.Copy(dst, rd)
|
||||
}
|
||||
|
||||
type statFunc func(*Object) error
|
||||
|
||||
func (o *Object) statTry() error {
|
||||
o.fs.statFuncMu.Lock()
|
||||
defer o.fs.statFuncMu.Unlock()
|
||||
|
||||
for _, f := range []statFunc{
|
||||
(*Object).statStatL, (*Object).statRealPath, (*Object).statReadLink,
|
||||
} {
|
||||
err := f(o)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "%s", err)
|
||||
} else {
|
||||
o.fs.statFunc = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("unable to resolve link target")
|
||||
}
|
||||
|
||||
const (
|
||||
statArgLc = "-Lc"
|
||||
statArgC = "-c"
|
||||
)
|
||||
|
||||
func (o *Object) statStatL() error {
|
||||
return o.statStatArg(statArgLc, path.Join(o.fs.root, o.remote))
|
||||
}
|
||||
|
||||
func (o *Object) statStatArg(arg, path string) error {
|
||||
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("stat %s %s", arg, "%f,%s,%Y"), path)
|
||||
output = strings.TrimSpace(output)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("stat %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "stat")
|
||||
}
|
||||
|
||||
parts := strings.Split(output, ",")
|
||||
if len(parts) != 3 {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
|
||||
mode, err := strconv.ParseUint(parts[0], 16, 32)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
size, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
modTime, err := strconv.ParseInt(parts[2], 10, 64)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
|
||||
o.size = int64(size)
|
||||
o.modTime = time.Unix(modTime, 0)
|
||||
o.mode = decodeEntryMode(uint32(mode))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) statReadLink() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("readlink -f", p)
|
||||
output = strings.TrimSuffix(output, "\n")
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("readlink %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "readlink")
|
||||
}
|
||||
return o.statStatArg(statArgC, output)
|
||||
}
|
||||
func (o *Object) statRealPath() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("realpath", p)
|
||||
output = strings.TrimSuffix(output, "\n")
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("realpath %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "realpath")
|
||||
}
|
||||
return o.statStatArg(statArgC, output)
|
||||
}
|
||||
|
||||
type touchFunc func(*Object, time.Time) error
|
||||
|
||||
func (o *Object) touchTry(t time.Time) error {
|
||||
o.fs.touchFuncMu.Lock()
|
||||
defer o.fs.touchFuncMu.Unlock()
|
||||
|
||||
for _, f := range []touchFunc{
|
||||
(*Object).touchCmd, (*Object).touchCd,
|
||||
} {
|
||||
err := f(o, t)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "%s", err)
|
||||
} else {
|
||||
o.fs.touchFunc = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("unable to resolve link target")
|
||||
}
|
||||
|
||||
const (
|
||||
touchArgCmd = "-cmd"
|
||||
touchArgCd = "-cd"
|
||||
)
|
||||
|
||||
func (o *Object) touchCmd(t time.Time) error {
|
||||
return o.touchStatArg(touchArgCmd, path.Join(o.fs.root, o.remote), t)
|
||||
}
|
||||
func (o *Object) touchCd(t time.Time) error {
|
||||
return o.touchStatArg(touchArgCd, path.Join(o.fs.root, o.remote), t)
|
||||
}
|
||||
|
||||
func (o *Object) touchStatArg(arg, path string, t time.Time) error {
|
||||
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("touch %s %s", arg, t.Format(time.RFC3339Nano)), path)
|
||||
output = strings.TrimSpace(output)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("touch %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "touch")
|
||||
}
|
||||
|
||||
err = o.stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if diff, ok := checkTimeEqualWithPrecision(t, o.modTime, o.fs.Precision()); !ok {
|
||||
return errors.Errorf("touch %q to %s was ineffective: %d", o.remote, t, diff)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
|
||||
dt := t0.Sub(t1)
|
||||
if dt >= precision || dt <= -precision {
|
||||
return dt, false
|
||||
}
|
||||
return dt, true
|
||||
}
|
||||
|
||||
func decodeEntryMode(entryMode uint32) os.FileMode {
|
||||
const (
|
||||
unixIFBLK = 0x6000
|
||||
unixIFMT = 0xf000
|
||||
unixIFCHR = 0x2000
|
||||
unixIFDIR = 0x4000
|
||||
unixIFIFO = 0x1000
|
||||
unixIFLNK = 0xa000
|
||||
unixIFREG = 0x8000
|
||||
unixIFSOCK = 0xc000
|
||||
unixISGID = 0x400
|
||||
unixISUID = 0x800
|
||||
unixISVTX = 0x200
|
||||
)
|
||||
|
||||
mode := os.FileMode(entryMode & 0777)
|
||||
switch entryMode & unixIFMT {
|
||||
case unixIFBLK:
|
||||
mode |= os.ModeDevice
|
||||
case unixIFCHR:
|
||||
mode |= os.ModeDevice | os.ModeCharDevice
|
||||
case unixIFDIR:
|
||||
mode |= os.ModeDir
|
||||
case unixIFIFO:
|
||||
mode |= os.ModeNamedPipe
|
||||
case unixIFLNK:
|
||||
mode |= os.ModeSymlink
|
||||
case unixIFREG:
|
||||
// nothing to do
|
||||
case unixIFSOCK:
|
||||
mode |= os.ModeSocket
|
||||
}
|
||||
if entryMode&unixISGID != 0 {
|
||||
mode |= os.ModeSetgid
|
||||
}
|
||||
if entryMode&unixISUID != 0 {
|
||||
mode |= os.ModeSetuid
|
||||
}
|
||||
if entryMode&unixISVTX != 0 {
|
||||
mode |= os.ModeSticky
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
type execDevice struct {
|
||||
*adb.Device
|
||||
}
|
||||
|
||||
func (d *execDevice) execCommandWithExitCode(cmd string, arg string) (string, int, error) {
|
||||
cmdLine := fmt.Sprintf("sh -c '%s \"$0\"; echo :$?' '%s'", cmd, strings.Replace(arg, "'", "'\\''", -1))
|
||||
fs.Debugf("adb", "exec: %s", cmdLine)
|
||||
conn, err := d.execCommand(cmdLine)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
resp, err := conn.ReadUntilEof()
|
||||
if err != nil {
|
||||
return "", -1, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
|
||||
outStr := string(resp)
|
||||
idx := strings.LastIndexByte(outStr, ':')
|
||||
if idx == -1 {
|
||||
return outStr, -1, fmt.Errorf("adb shell aborted, can not parse exit code")
|
||||
}
|
||||
exitCode, _ := strconv.Atoi(strings.TrimSpace(outStr[idx+1:]))
|
||||
if exitCode != 0 {
|
||||
err = adb.ShellExitError{Command: cmdLine, ExitCode: exitCode}
|
||||
}
|
||||
return outStr[:idx], exitCode, err
|
||||
}
|
||||
|
||||
func (d *execDevice) execCommand(cmd string, args ...string) (*wire.Conn, error) {
|
||||
cmd = prepareCommandLineEscaped(cmd, args...)
|
||||
conn, err := d.Dial()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
defer func() {
|
||||
if err != nil && conn != nil {
|
||||
_ = conn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
req := fmt.Sprintf("exec:%s", cmd)
|
||||
|
||||
if err = conn.SendMessage([]byte(req)); err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
if _, err = conn.ReadStatus(req); err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func prepareCommandLineEscaped(cmd string, args ...string) string {
|
||||
for i, arg := range args {
|
||||
args[i] = fmt.Sprintf("'%s'", strings.Replace(arg, "'", "'\\''", -1))
|
||||
}
|
||||
|
||||
// Prepend the command to the args array.
|
||||
if len(args) > 0 {
|
||||
cmd = fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
type adbReader struct {
|
||||
io.ReadCloser
|
||||
skip int64
|
||||
read int64
|
||||
expected int64
|
||||
}
|
||||
|
||||
func (r *adbReader) Read(b []byte) (n int, err error) {
|
||||
n, err = r.ReadCloser.Read(b)
|
||||
if s := r.skip; n > 0 && s > 0 {
|
||||
_n := int64(n)
|
||||
if _n <= s {
|
||||
r.skip -= _n
|
||||
return r.Read(b)
|
||||
}
|
||||
r.skip = 0
|
||||
copy(b, b[s:n])
|
||||
n -= int(s)
|
||||
}
|
||||
r.read += int64(n)
|
||||
if err == io.EOF && r.read < r.expected {
|
||||
fs.Debugf("adb", "Read: read: %d expected: %d n: %d", r.read, r.expected, n)
|
||||
return n, io.ErrUnexpectedEOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
20
backend/adb/adb_test.go
Normal file
20
backend/adb/adb_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Test ADB filesystem interface
|
||||
package adb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/adb"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAdb:/data/local/tmp",
|
||||
NilObject: (*adb.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: "TestAdb", Key: "copy_links", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -2,13 +2,12 @@ package alias
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -31,7 +30,7 @@ type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path.
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -47,13 +46,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(opt.Remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
_, configName, fsPath, err := fs.ParseRemote(opt.Remote)
|
||||
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = path.Join(fsPath, filepath.ToSlash(root))
|
||||
if configName == "local" {
|
||||
return fs.NewFs(root)
|
||||
}
|
||||
return fs.NewFs(configName + ":" + root)
|
||||
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ func TestNewFS(t *testing.T) {
|
||||
wantEntry := test.entries[i]
|
||||
|
||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||
require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what)
|
||||
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
||||
_, isDir := gotEntry.(fs.Directory)
|
||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package all
|
||||
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/backend/adb"
|
||||
_ "github.com/ncw/rclone/backend/alias"
|
||||
_ "github.com/ncw/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/ncw/rclone/backend/azureblob"
|
||||
@@ -25,6 +26,7 @@ import (
|
||||
_ "github.com/ncw/rclone/backend/s3"
|
||||
_ "github.com/ncw/rclone/backend/sftp"
|
||||
_ "github.com/ncw/rclone/backend/swift"
|
||||
_ "github.com/ncw/rclone/backend/union"
|
||||
_ "github.com/ncw/rclone/backend/webdav"
|
||||
_ "github.com/ncw/rclone/backend/yandex"
|
||||
)
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/go-acd"
|
||||
acd "github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
@@ -97,13 +97,42 @@ func init() {
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_wait_per_gb",
|
||||
Help: "Additional time per GB to wait after a failed complete upload to see if it appears.",
|
||||
Name: "upload_wait_per_gb",
|
||||
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
|
||||
|
||||
Sometimes Amazon Drive gives an error when a file has been fully
|
||||
uploaded but the file appears anyway after a little while. This
|
||||
happens sometimes for files over 1GB in size and nearly every time for
|
||||
files bigger than 10GB. This parameter controls the time rclone waits
|
||||
for the file to appear.
|
||||
|
||||
The default value for this parameter is 3 minutes per GB, so by
|
||||
default it will wait 3 minutes for every GB uploaded to see if the
|
||||
file appears.
|
||||
|
||||
You can disable this feature by setting it to 0. This may cause
|
||||
conflict errors as rclone retries the failed upload but the file will
|
||||
most likely appear correctly eventually.
|
||||
|
||||
These values were determined empirically by observing lots of uploads
|
||||
of big files for a range of file sizes.
|
||||
|
||||
Upload with the "-v" flag to see more info about what rclone is doing
|
||||
in this situation.`,
|
||||
Default: fs.Duration(180 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "templink_threshold",
|
||||
Help: "Files >= this size will be downloaded via their tempLink.",
|
||||
Name: "templink_threshold",
|
||||
Help: `Files >= this size will be downloaded via their tempLink.
|
||||
|
||||
Files this size or more will be downloaded via their "tempLink". This
|
||||
is to work around a problem with Amazon Drive which blocks downloads
|
||||
of files bigger than about 10GB. The default for this is 9GB which
|
||||
shouldn't need to be changed.
|
||||
|
||||
To download files above this threshold, rclone requests a "tempLink"
|
||||
which downloads the file through a temporary URL directly from the
|
||||
underlying S3 storage.`,
|
||||
Default: defaultTempLinkThreshold,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -235,7 +264,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Amazon Drive: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
||||
}
|
||||
|
||||
c := acd.NewClient(oAuthClient)
|
||||
@@ -283,16 +312,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, f.trueRootID, &newF)
|
||||
newF.root = newRoot
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -300,8 +329,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -1240,24 +1274,38 @@ func (o *Object) MimeType() string {
|
||||
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
checkpoint := f.opt.Checkpoint
|
||||
|
||||
quit := make(chan bool)
|
||||
go func() {
|
||||
var ticker *time.Ticker
|
||||
var tickerC <-chan time.Time
|
||||
for {
|
||||
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
|
||||
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
|
||||
fs.Debugf(f, "Unable to save checkpoint: %v", err)
|
||||
}
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
case <-time.After(pollInterval):
|
||||
case pollInterval, ok := <-pollIntervalChan:
|
||||
if !ok {
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
}
|
||||
return
|
||||
}
|
||||
if pollInterval == 0 {
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
ticker, tickerC = nil, nil
|
||||
}
|
||||
} else {
|
||||
ticker = time.NewTicker(pollInterval)
|
||||
tickerC = ticker.C
|
||||
}
|
||||
case <-tickerC:
|
||||
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
|
||||
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
|
||||
fs.Debugf(f, "Unable to save checkpoint: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return quit
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
@@ -21,12 +22,14 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/2018-03-28/azblob"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
@@ -37,18 +40,19 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 10 * time.Second
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
listChunkSize = 5000 // number of items to read at once
|
||||
maxListChunkSize = 5000 // number of items to read at once
|
||||
modTimeKey = "mtime"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
maxTotalParts = 50000 // in multipart upload
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
// maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
||||
defaultChunkSize = 4 * 1024 * 1024
|
||||
maxChunkSize = 100 * 1024 * 1024
|
||||
defaultUploadCutoff = 256 * 1024 * 1024
|
||||
maxUploadCutoff = 256 * 1024 * 1024
|
||||
defaultChunkSize = 4 * fs.MebiByte
|
||||
maxChunkSize = 100 * fs.MebiByte
|
||||
defaultUploadCutoff = 256 * fs.MebiByte
|
||||
maxUploadCutoff = 256 * fs.MebiByte
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -72,18 +76,44 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload.",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Help: "Cutoff for switching to chunked upload (<= 256MB).",
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Upload chunk size. Must fit in memory.",
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size (<= 100MB).
|
||||
|
||||
Note that this is stored in memory and there may be up to
|
||||
"--transfers" chunks stored at once in memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Help: `Size of blob list.
|
||||
|
||||
This sets the number of blobs requested in each listing chunk. Default
|
||||
is the maximum, 5000. "List blobs" requests are permitted 2 minutes
|
||||
per megabyte to complete. If an operation is taking longer than 2
|
||||
minutes per megabyte on average, it will time out (
|
||||
[source](https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-blob-service-operations#exceptions-to-default-timeout-interval)
|
||||
). This can be used to limit the number of blobs items to return, to
|
||||
avoid the time out.`,
|
||||
Default: maxListChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "access_tier",
|
||||
Help: "Access tier of blob, supports hot, cool and archive tiers.\nArchived blobs can be restored by setting access tier to hot or cool." +
|
||||
" Leave blank if you intend to use default access tier, which is set at account level",
|
||||
Help: `Access tier of blob: hot, cool or archive.
|
||||
|
||||
Archived blobs can be restored by setting access tier to hot or
|
||||
cool. Leave blank if you intend to use default access tier, which is
|
||||
set at account level
|
||||
|
||||
If there is no "access tier" specified, rclone doesn't apply any tier.
|
||||
rclone performs "Set Tier" operation on blobs while uploading, if objects
|
||||
are not modified, specifying "access tier" to new one will have no effect.
|
||||
If blobs are in "archive tier" at remote, trying to perform data transfer
|
||||
operations from remote will not be allowed. User should first restore by
|
||||
tiering blob to "Hot" or "Cool".`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
@@ -91,13 +121,14 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
}
|
||||
|
||||
// Fs represents a remote azure server
|
||||
@@ -106,6 +137,7 @@ type Fs struct {
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // http client we are using
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
cntURL *azblob.ContainerURL // reference to containerURL
|
||||
container string // the container we are working on
|
||||
@@ -171,6 +203,19 @@ func parsePath(path string) (container, directory string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// validateAccessTier checks if azureblob supports user supplied tier
|
||||
func validateAccessTier(tier string) bool {
|
||||
switch tier {
|
||||
case string(azblob.AccessTierHot),
|
||||
string(azblob.AccessTierCool),
|
||||
string(azblob.AccessTierArchive):
|
||||
// valid cases
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
401, // Unauthorized (eg "Token has expired")
|
||||
@@ -196,7 +241,73 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
if cs > maxChunkSize {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%v must be less than or equal to %v", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// httpClientFactory creates a Factory object that sends HTTP requests
|
||||
// to a rclone's http.Client.
|
||||
//
|
||||
// copied from azblob.newDefaultHTTPClientFactory
|
||||
func httpClientFactory(client *http.Client) pipeline.Factory {
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
r, err := client.Do(request.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = pipeline.NewError(err, "HTTP request failed")
|
||||
}
|
||||
return pipeline.NewHTTPResponse(r), err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// newPipeline creates a Pipeline using the specified credentials and options.
|
||||
//
|
||||
// this code was copied from azblob.NewPipeline
|
||||
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
|
||||
// Closest to API goes first; closest to the wire goes last
|
||||
factories := []pipeline.Factory{
|
||||
azblob.NewTelemetryPolicyFactory(o.Telemetry),
|
||||
azblob.NewUniqueRequestIDPolicyFactory(),
|
||||
azblob.NewRetryPolicyFactory(o.Retry),
|
||||
c,
|
||||
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
|
||||
azblob.NewRequestLogPolicyFactory(o.RequestLog),
|
||||
}
|
||||
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -205,11 +316,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.UploadCutoff > maxUploadCutoff {
|
||||
return nil, errors.Errorf("azure: upload cutoff (%v) must be less than or equal to %v", opt.UploadCutoff, maxUploadCutoff)
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "azure: upload cutoff")
|
||||
}
|
||||
if opt.ChunkSize > maxChunkSize {
|
||||
return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, opt.ChunkSize)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "azure: chunk size")
|
||||
}
|
||||
if opt.ListChunkSize > maxListChunkSize {
|
||||
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||
}
|
||||
container, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
@@ -221,17 +337,28 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
if opt.AccessTier == "" {
|
||||
opt.AccessTier = string(defaultAccessTier)
|
||||
} else {
|
||||
switch opt.AccessTier {
|
||||
case string(azblob.AccessTierHot):
|
||||
case string(azblob.AccessTierCool):
|
||||
case string(azblob.AccessTierArchive):
|
||||
// valid cases
|
||||
default:
|
||||
return nil, errors.Errorf("azure: Supported access tiers are %s, %s and %s", string(azblob.AccessTierHot), string(azblob.AccessTierCool), azblob.AccessTierArchive)
|
||||
}
|
||||
} else if !validateAccessTier(opt.AccessTier) {
|
||||
return nil, errors.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
|
||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant).SetPacer(pacer.S3Pacer),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
|
||||
var (
|
||||
u *url.URL
|
||||
serviceURL azblob.ServiceURL
|
||||
@@ -239,12 +366,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
)
|
||||
switch {
|
||||
case opt.Account != "" && opt.Key != "":
|
||||
credential := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to parse credentials")
|
||||
}
|
||||
|
||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
case opt.SASURL != "":
|
||||
@@ -253,7 +384,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
||||
}
|
||||
// use anonymous credentials in case of sas url
|
||||
pipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})
|
||||
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
// Check if we have container level SAS or account level sas
|
||||
parts := azblob.NewBlobURLParts(*u)
|
||||
if parts.ContainerName != "" {
|
||||
@@ -261,7 +392,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
||||
}
|
||||
|
||||
container = parts.ContainerName
|
||||
f.container = parts.ContainerName
|
||||
containerURL = azblob.NewContainerURL(*u, pipeline)
|
||||
} else {
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
@@ -270,22 +401,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
default:
|
||||
return nil, errors.New("Need account+key or connectionString or sasURL")
|
||||
}
|
||||
f.svcURL = &serviceURL
|
||||
f.cntURL = &containerURL
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
svcURL: &serviceURL,
|
||||
cntURL: &containerURL,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the (container,directory) is actually an existing file
|
||||
@@ -299,8 +417,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
_, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.root = oldRoot
|
||||
return f, nil
|
||||
}
|
||||
@@ -356,6 +474,21 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
}
|
||||
|
||||
// Returns whether file is a directory marker or not
|
||||
func isDirectoryMarker(size int64, metadata azblob.Metadata, remote string) bool {
|
||||
// Directory markers are 0 length
|
||||
if size == 0 {
|
||||
// Note that metadata with hdi_isfolder = true seems to be a
|
||||
// defacto standard for marking blobs as directories.
|
||||
endsWithSlash := strings.HasSuffix(remote, "/")
|
||||
if endsWithSlash || remote == "" || metadata["hdi_isfolder"] == "true" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object
|
||||
type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
||||
|
||||
@@ -391,6 +524,7 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
MaxResults: int32(maxResults),
|
||||
}
|
||||
ctx := context.Background()
|
||||
directoryMarkers := map[string]struct{}{}
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListBlobsHierarchySegmentResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -420,13 +554,23 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote := file.Name[len(f.root):]
|
||||
// Check for directory
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if isDirectory {
|
||||
remote = remote[:len(remote)-1]
|
||||
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, file, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Keep track of directory markers. If recursing then
|
||||
// there will be no Prefixes so no need to keep track
|
||||
if !recurse {
|
||||
directoryMarkers[remote] = struct{}{}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, file, isDirectory)
|
||||
err = fn(remote, file, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -439,6 +583,10 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote = remote[len(f.root):]
|
||||
// Don't send if already sent as a directory marker
|
||||
if _, found := directoryMarkers[remote]; found {
|
||||
continue
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, nil, true)
|
||||
if err != nil {
|
||||
@@ -474,7 +622,7 @@ func (f *Fs) markContainerOK() {
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.list(dir, false, listChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
err = f.list(dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -545,7 +693,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.list(dir, true, listChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
err = f.list(dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -566,11 +714,11 @@ type listContainerFn func(*azblob.ContainerItem) error
|
||||
// listContainersToFn lists the containers to the function supplied
|
||||
func (f *Fs) listContainersToFn(fn listContainerFn) error {
|
||||
params := azblob.ListContainersSegmentOptions{
|
||||
MaxResults: int32(listChunkSize),
|
||||
MaxResults: int32(f.opt.ListChunkSize),
|
||||
}
|
||||
ctx := context.Background()
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListContainersResponse
|
||||
var response *azblob.ListContainersSegmentResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.svcURL.ListContainersSegment(ctx, marker, params)
|
||||
@@ -606,6 +754,35 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
||||
return fs, fs.Update(in, src, options...)
|
||||
}
|
||||
|
||||
// Check if the container exists
|
||||
//
|
||||
// NB this can return incorrect results if called immediately after container deletion
|
||||
func (f *Fs) dirExists() (bool, error) {
|
||||
options := azblob.ListBlobsSegmentOptions{
|
||||
Details: azblob.BlobListingDetails{
|
||||
Copy: false,
|
||||
Metadata: false,
|
||||
Snapshots: false,
|
||||
UncommittedBlobs: false,
|
||||
Deleted: false,
|
||||
},
|
||||
MaxResults: 1,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
ctx := context.Background()
|
||||
_, err := f.cntURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "", options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
f.containerOKMu.Lock()
|
||||
@@ -613,6 +790,15 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
if f.containerOK {
|
||||
return nil
|
||||
}
|
||||
if !f.containerDeleted {
|
||||
exists, err := f.dirExists()
|
||||
if err == nil {
|
||||
f.containerOK = exists
|
||||
}
|
||||
if err != nil || exists {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// now try to create the container
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -625,6 +811,11 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
f.containerOK = true
|
||||
return false, nil
|
||||
case azblob.ServiceCodeContainerBeingDeleted:
|
||||
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
|
||||
// When a container is deleted, a container with the same name cannot be created
|
||||
// for at least 30 seconds; the container may not be available for more than 30
|
||||
// seconds if the service is still processing the request.
|
||||
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
|
||||
f.containerDeleted = true
|
||||
return true, err
|
||||
}
|
||||
@@ -642,7 +833,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// isEmpty checks to see if a given directory is empty and returns an error if not
|
||||
func (f *Fs) isEmpty(dir string) (err error) {
|
||||
empty := true
|
||||
err = f.list("", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
err = f.list(dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
empty = false
|
||||
return nil
|
||||
})
|
||||
@@ -752,7 +943,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
var startCopy *azblob.BlobStartCopyFromURLResponse
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, options, options)
|
||||
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, azblob.ModifiedAccessConditions{}, options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -837,26 +1028,37 @@ func (o *Object) setMetadata(metadata azblob.Metadata) {
|
||||
// o.md5
|
||||
// o.meta
|
||||
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
|
||||
// NOTE - In BlobGetPropertiesResponse, Client library returns MD5 as base64 decoded string
|
||||
// unlike BlobProperties in BlobItem (used in decodeMetadataFromBlob) which returns base64
|
||||
// encoded bytes. Object needs to maintain this as base64 encoded string.
|
||||
metadata := info.NewMetadata()
|
||||
size := info.ContentLength()
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
||||
o.mimeType = info.ContentType()
|
||||
o.size = info.ContentLength()
|
||||
o.modTime = time.Time(info.LastModified())
|
||||
o.size = size
|
||||
o.modTime = info.LastModified()
|
||||
o.accessTier = azblob.AccessTierType(info.AccessTier())
|
||||
o.setMetadata(info.NewMetadata())
|
||||
o.setMetadata(metadata)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
||||
o.md5 = string(info.Properties.ContentMD5)
|
||||
metadata := info.Metadata
|
||||
size := *info.Properties.ContentLength
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.Properties.ContentMD5)
|
||||
o.mimeType = *info.Properties.ContentType
|
||||
o.size = *info.Properties.ContentLength
|
||||
o.size = size
|
||||
o.modTime = info.Properties.LastModified
|
||||
o.accessTier = info.Properties.AccessTier
|
||||
o.setMetadata(info.Metadata)
|
||||
o.setMetadata(metadata)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -902,12 +1104,6 @@ func (o *Object) readMetaData() (err error) {
|
||||
return o.decodeMetaDataFromPropertiesResponse(blobProperties)
|
||||
}
|
||||
|
||||
// timeString returns modTime as the number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC as a decimal string.
|
||||
func timeString(modTime time.Time) string {
|
||||
return strconv.FormatInt(modTime.UnixNano()/1E6, 10)
|
||||
}
|
||||
|
||||
// parseTimeString converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
||||
// the modTime variable.
|
||||
@@ -1068,7 +1264,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, blob *azblob.BlobURL,
|
||||
var (
|
||||
rawID uint64
|
||||
blockID = "" // id in base64 encoded form
|
||||
blocks = make([]string, totalParts)
|
||||
blocks []string
|
||||
)
|
||||
|
||||
// increment the blockID
|
||||
@@ -1125,11 +1321,14 @@ outer:
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
|
||||
// Upload the block, with MD5 for check
|
||||
md5sum := md5.Sum(buf)
|
||||
transactionalMD5 := md5sum[:]
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
bufferReader := bytes.NewReader(buf)
|
||||
wrappedReader := wrap(bufferReader)
|
||||
rs := readSeeker{wrappedReader, bufferReader}
|
||||
_, err = blockBlobURL.StageBlock(ctx, blockID, rs, ac)
|
||||
_, err = blockBlobURL.StageBlock(ctx, blockID, &rs, ac, transactionalMD5)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
|
||||
@@ -1206,11 +1405,20 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
Metadata: o.meta,
|
||||
BlobHTTPHeaders: httpHeaders,
|
||||
}
|
||||
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
|
||||
// is merged the SDK can't upload a single blob of exactly the chunk
|
||||
// size, so upload with a multpart upload to work around.
|
||||
// See: https://github.com/ncw/rclone/issues/2653
|
||||
multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
|
||||
if size == int64(o.fs.opt.ChunkSize) {
|
||||
multipartUpload = true
|
||||
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
if size >= int64(o.fs.opt.UploadCutoff) {
|
||||
if multipartUpload {
|
||||
// If a large file upload in chunks
|
||||
err = o.uploadMultipart(in, size, &blob, &httpHeaders)
|
||||
} else {
|
||||
@@ -1236,16 +1444,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Now, set blob tier based on configured access tier
|
||||
desiredAccessTier := azblob.AccessTierType(o.fs.opt.AccessTier)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to set Blob Tier")
|
||||
}
|
||||
return nil
|
||||
return o.SetTier(o.fs.opt.AccessTier)
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
@@ -1270,6 +1469,41 @@ func (o *Object) AccessTier() azblob.AccessTierType {
|
||||
return o.accessTier
|
||||
}
|
||||
|
||||
// SetTier performs changing object tier
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
if !validateAccessTier(tier) {
|
||||
return errors.Errorf("Tier %s not supported by Azure Blob Storage", tier)
|
||||
}
|
||||
|
||||
// Check if current tier already matches with desired tier
|
||||
if o.GetTier() == tier {
|
||||
return nil
|
||||
}
|
||||
desiredAccessTier := azblob.AccessTierType(tier)
|
||||
blob := o.getBlobReference()
|
||||
ctx := context.Background()
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to set Blob Tier")
|
||||
}
|
||||
|
||||
// Set access tier on local object also, this typically
|
||||
// gets updated on get blob properties
|
||||
o.accessTier = desiredAccessTier
|
||||
fs.Debugf(o, "Successfully changed object tier to %s", tier)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTier returns object tier in azure as string
|
||||
func (o *Object) GetTier() string {
|
||||
return string(o.accessTier)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
|
||||
18
backend/azureblob/azureblob_internal_test.go
Normal file
18
backend/azureblob/azureblob_internal_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// Check first feature flags are set on this
|
||||
// remote
|
||||
enabled := f.Features().SetTier
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
@@ -1,20 +1,37 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob_test
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/azureblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*azureblob.Object)(nil),
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build freebsd netbsd openbsd plan9 solaris !go1.8
|
||||
// +build plan9 solaris !go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -17,12 +17,12 @@ type Error struct {
|
||||
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
// Error satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
||||
}
|
||||
|
||||
// Fatal statisfies the Fatal interface
|
||||
// Fatal satisfies the Fatal interface
|
||||
//
|
||||
// It indicates which errors should be treated as fatal
|
||||
func (e *Error) Fatal() bool {
|
||||
@@ -100,7 +100,7 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is unitialised
|
||||
// IsZero returns true if the timestamp is uninitialized
|
||||
func (t Timestamp) IsZero() bool {
|
||||
return time.Time(t).IsZero()
|
||||
}
|
||||
@@ -136,6 +136,7 @@ type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
|
||||
176
backend/b2/b2.go
176
backend/b2/b2.go
@@ -48,9 +48,9 @@ const (
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
minChunkSize = 5E6
|
||||
defaultChunkSize = 96 * 1024 * 1024
|
||||
defaultUploadCutoff = 200E6
|
||||
minChunkSize = 5 * fs.MebiByte
|
||||
defaultChunkSize = 96 * fs.MebiByte
|
||||
defaultUploadCutoff = 200 * fs.MebiByte
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -77,14 +77,24 @@ func init() {
|
||||
Help: "Endpoint for the service.\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "test_mode",
|
||||
Help: "A flag string for X-Bz-Test-Mode header for debugging.",
|
||||
Name: "test_mode",
|
||||
Help: `A flag string for X-Bz-Test-Mode header for debugging.
|
||||
|
||||
This is for debugging purposes only. Setting it to one of the strings
|
||||
below will cause b2 to return specific errors:
|
||||
|
||||
* "fail_some_uploads"
|
||||
* "expire_some_account_authorization_tokens"
|
||||
* "force_cap_exceeded"
|
||||
|
||||
These will be set in the "X-Bz-Test-Mode" header which is documented
|
||||
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
|
||||
Default: "",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "versions",
|
||||
Help: "Include old versions in directory listings.",
|
||||
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -92,14 +102,36 @@ func init() {
|
||||
Help: "Permanently delete files on remote removal, otherwise hide files.",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload.",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
|
||||
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||
|
||||
This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Upload chunk size. Must fit in memory.",
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size. Must fit in memory.
|
||||
|
||||
When uploading large files, chunk the file into this size. Note that
|
||||
these chunks are buffered in memory and there might a maximum of
|
||||
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
|
||||
minimum size.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_url",
|
||||
Help: `Custom endpoint for downloads.
|
||||
|
||||
This is usually set to a Cloudflare CDN URL as Backblaze offers
|
||||
free egress for data downloaded through the Cloudflare network.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
@@ -107,14 +139,16 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
@@ -263,7 +297,38 @@ func errorHandler(resp *http.Response) error {
|
||||
return errResponse
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
f.fillBufferTokens() // reset the buffer tokens
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
|
||||
if cs < opt.ChunkSize {
|
||||
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(&f.opt, cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -271,11 +336,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.UploadCutoff < opt.ChunkSize {
|
||||
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", opt.UploadCutoff, opt.ChunkSize)
|
||||
err = checkUploadCutoff(opt, opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2: upload cutoff")
|
||||
}
|
||||
if opt.ChunkSize < minChunkSize {
|
||||
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, opt.ChunkSize)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2: chunk size")
|
||||
}
|
||||
bucket, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
@@ -291,13 +358,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt.Endpoint = defaultEndpoint
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
bufferTokens: make(chan []byte, fs.Config.Transfers),
|
||||
name: name,
|
||||
opt: *opt,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -310,16 +376,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.srv.SetHeader(testModeHeader, testMode)
|
||||
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
|
||||
}
|
||||
// Fill up the buffer tokens
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
f.bufferTokens <- nil
|
||||
}
|
||||
f.fillBufferTokens()
|
||||
err = f.authorizeAccount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to authorize account")
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.bucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := f.info.Allowed.BucketName
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
if allowedBucket != f.bucket {
|
||||
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
}
|
||||
f.markBucketOK()
|
||||
f.setBucketID(f.info.Allowed.BucketID)
|
||||
}
|
||||
@@ -421,6 +491,14 @@ func (f *Fs) clearUploadURL() {
|
||||
f.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Fill up (or reset) the buffer tokens
|
||||
func (f *Fs) fillBufferTokens() {
|
||||
f.bufferTokens = make(chan []byte, fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
f.bufferTokens <- nil
|
||||
}
|
||||
}
|
||||
|
||||
// getUploadBlock gets a block from the pool of size chunkSize
|
||||
func (f *Fs) getUploadBlock() []byte {
|
||||
buf := <-f.bufferTokens
|
||||
@@ -924,6 +1002,12 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
errReturn = err
|
||||
}
|
||||
}
|
||||
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
||||
if time.Since(time.Time(timestamp)).Hours() > 24 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Delete Config.Transfers in parallel
|
||||
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
|
||||
@@ -947,6 +1031,9 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
if object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
toBeDeleted <- object
|
||||
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||
toBeDeleted <- object
|
||||
} else {
|
||||
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
|
||||
}
|
||||
@@ -1218,9 +1305,17 @@ var _ io.ReadCloser = &openFile{}
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.fs.info.DownloadURL,
|
||||
Options: options,
|
||||
}
|
||||
|
||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||
// otherwise use the custom downloadUrl
|
||||
if o.fs.opt.DownloadURL == "" {
|
||||
opts.RootURL = o.fs.info.DownloadURL
|
||||
} else {
|
||||
opts.RootURL = o.fs.opt.DownloadURL
|
||||
}
|
||||
|
||||
// Download by id if set otherwise by name
|
||||
if o.id != "" {
|
||||
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
|
||||
@@ -1381,7 +1476,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// Content-Type b2/x-auto to automatically set the stored Content-Type
|
||||
// post upload. In the case where a file extension is absent or the
|
||||
// lookup fails, the Content-Type is set to application/octet-stream. The
|
||||
// Content-Type mappings can be purused here.
|
||||
// Content-Type mappings can be pursued here.
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
// required
|
||||
@@ -1428,11 +1523,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
},
|
||||
ContentLength: &size,
|
||||
}
|
||||
// for go1.8 (see release notes) we must nil the Body if we want a
|
||||
// "Content-Length: 0" header which b2 requires for all files.
|
||||
if size == 0 {
|
||||
opts.Body = nil
|
||||
}
|
||||
var response api.FileInfo
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test B2 filesystem interface
|
||||
package b2_test
|
||||
package b2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,23 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestB2:",
|
||||
NilObject: (*b2.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
NeedMultipleChunks: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -116,8 +116,10 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
||||
},
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
||||
@@ -45,7 +45,7 @@ type Error struct {
|
||||
RequestID string `json:"request_id"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
|
||||
if e.Message != "" {
|
||||
@@ -57,11 +57,11 @@ func (e *Error) Error() string {
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error statisfies the error interface
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
@@ -86,6 +86,10 @@ type Item struct {
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
SharedLink struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Access string `json:"access,omitempty"`
|
||||
} `json:"shared_link"`
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the item
|
||||
@@ -145,6 +149,14 @@ type CopyFile struct {
|
||||
Parent Parent `json:"parent"`
|
||||
}
|
||||
|
||||
// CreateSharedLink is the request for Public Link
|
||||
type CreateSharedLink struct {
|
||||
SharedLink struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Access string `json:"access,omitempty"`
|
||||
} `json:"shared_link"`
|
||||
}
|
||||
|
||||
// UploadSessionRequest is uses in Create Upload Session
|
||||
type UploadSessionRequest struct {
|
||||
FolderID string `json:"folder_id,omitempty"` // don't pass for update
|
||||
|
||||
@@ -85,7 +85,7 @@ func init() {
|
||||
Help: "Box App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload.",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -126,6 +126,7 @@ type Object struct {
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
publicLink string // Public Link for the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
}
|
||||
|
||||
@@ -170,13 +171,13 @@ var retryErrorCodes = []int{
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
authRety := false
|
||||
authRetry := false
|
||||
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
authRety = true
|
||||
authRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// substitute reserved characters for box
|
||||
@@ -251,7 +252,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Box: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
@@ -282,16 +283,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, rootID, &newF)
|
||||
newF.root = newRoot
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -299,8 +300,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f.features.Fill(&tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -523,10 +530,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(in, src, options...)
|
||||
return existingObj, existingObj.Update(in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(in, src)
|
||||
@@ -844,6 +851,46 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(remote string) (string, error) {
|
||||
id, err := f.dirCache.FindDir(remote, false)
|
||||
var opts rest.Opts
|
||||
if err == nil {
|
||||
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
||||
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/folders/" + id,
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
||||
o, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if o.(*Object).publicLink != "" {
|
||||
return o.(*Object).publicLink, nil
|
||||
}
|
||||
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/files/" + o.(*Object).id,
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
}
|
||||
|
||||
shareLink := api.CreateSharedLink{}
|
||||
var info api.Item
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info.SharedLink.URL, err
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing as an
|
||||
// optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
@@ -908,6 +955,7 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
o.sha1 = info.SHA1
|
||||
o.modTime = info.ModTime()
|
||||
o.id = info.ID
|
||||
o.publicLink = info.SharedLink.URL
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1087,6 +1135,7 @@ var (
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -211,8 +211,8 @@ outer:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
if reqSize >= chunkSize {
|
||||
reqSize = chunkSize
|
||||
}
|
||||
|
||||
// Make a block of memory
|
||||
|
||||
451
backend/cache/cache.go
vendored
451
backend/cache/cache.go
vendored
@@ -6,10 +6,13 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
@@ -21,6 +24,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
@@ -80,8 +84,16 @@ func init() {
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading.",
|
||||
Name: "plex_insecure",
|
||||
Help: "Skip all certificate verifications when connecting to the Plex server",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `The size of a chunk (partial file data).
|
||||
|
||||
Use lower numbers for slower connections. If the chunk size is
|
||||
changed, any downloaded chunks will be invalid and cache-chunk-path
|
||||
will need to be cleared or unexpected EOF errors will occur.`,
|
||||
Default: DefCacheChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "1m",
|
||||
@@ -94,8 +106,10 @@ func init() {
|
||||
Help: "10 MB",
|
||||
}},
|
||||
}, {
|
||||
Name: "info_age",
|
||||
Help: "How much time should object info (file size, file hashes etc) be stored in cache.\nUse a very high value if you don't plan on changing the source FS from outside the cache.\nAccepted units are: \"s\", \"m\", \"h\".",
|
||||
Name: "info_age",
|
||||
Help: `How long to cache file structure information (directory listings, file size, times etc).
|
||||
If all write operations are done through the cache then you can safely make
|
||||
this value very large as the cache store will also be updated in real time.`,
|
||||
Default: DefCacheInfoAge,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "1h",
|
||||
@@ -108,8 +122,11 @@ func init() {
|
||||
Help: "48 hours",
|
||||
}},
|
||||
}, {
|
||||
Name: "chunk_total_size",
|
||||
Help: "The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted.",
|
||||
Name: "chunk_total_size",
|
||||
Help: `The total size that the chunks can take up on the local disk.
|
||||
|
||||
If the cache exceeds this value then it will start to delete the
|
||||
oldest chunks until it goes under this value.`,
|
||||
Default: DefCacheTotalChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "500M",
|
||||
@@ -124,63 +141,143 @@ func init() {
|
||||
}, {
|
||||
Name: "db_path",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: "Directory to cache DB",
|
||||
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_path",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: "Directory to cache chunk files",
|
||||
Name: "chunk_path",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: `Directory to cache chunk files.
|
||||
|
||||
Path to where partial file data (chunks) are stored locally. The remote
|
||||
name is appended to the final path.
|
||||
|
||||
This config follows the "--cache-db-path". If you specify a custom
|
||||
location for "--cache-db-path" and don't specify one for "--cache-chunk-path"
|
||||
then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "db_purge",
|
||||
Default: false,
|
||||
Help: "Purge the cache DB before",
|
||||
Help: "Clear all the cached data for this remote on start.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_clean_interval",
|
||||
Default: DefCacheChunkCleanInterval,
|
||||
Help: "Interval at which chunk cleanup runs",
|
||||
Name: "chunk_clean_interval",
|
||||
Default: DefCacheChunkCleanInterval,
|
||||
Help: `How often should the cache perform cleanups of the chunk storage.
|
||||
The default value should be ok for most people. If you find that the
|
||||
cache goes over "cache-chunk-total-size" too often then try to lower
|
||||
this value to force it to perform cleanups more often.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "read_retries",
|
||||
Default: DefCacheReadRetries,
|
||||
Help: "How many times to retry a read from a cache storage",
|
||||
Name: "read_retries",
|
||||
Default: DefCacheReadRetries,
|
||||
Help: `How many times to retry a read from a cache storage.
|
||||
|
||||
Since reading from a cache stream is independent from downloading file
|
||||
data, readers can get to a point where there's no more data in the
|
||||
cache. Most of the times this can indicate a connectivity issue if
|
||||
cache isn't able to provide file data anymore.
|
||||
|
||||
For really slow connections, increase this to a point where the stream is
|
||||
able to provide data but your experience will be very stuttering.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "workers",
|
||||
Default: DefCacheTotalWorkers,
|
||||
Help: "How many workers should run in parallel to download chunks",
|
||||
Name: "workers",
|
||||
Default: DefCacheTotalWorkers,
|
||||
Help: `How many workers should run in parallel to download chunks.
|
||||
|
||||
Higher values will mean more parallel processing (better CPU needed)
|
||||
and more concurrent requests on the cloud provider. This impacts
|
||||
several aspects like the cloud provider API limits, more stress on the
|
||||
hardware that rclone runs on but it also means that streams will be
|
||||
more fluid and data will be available much more faster to readers.
|
||||
|
||||
**Note**: If the optional Plex integration is enabled then this
|
||||
setting will adapt to the type of reading performed and the value
|
||||
specified here will be used as a maximum number of workers to use.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_no_memory",
|
||||
Default: DefCacheChunkNoMemory,
|
||||
Help: "Disable the in-memory cache for storing chunks during streaming",
|
||||
Name: "chunk_no_memory",
|
||||
Default: DefCacheChunkNoMemory,
|
||||
Help: `Disable the in-memory cache for storing chunks during streaming.
|
||||
|
||||
By default, cache will keep file data during streaming in RAM as well
|
||||
to provide it to readers as fast as possible.
|
||||
|
||||
This transient data is evicted as soon as it is read and the number of
|
||||
chunks stored doesn't exceed the number of workers. However, depending
|
||||
on other settings like "cache-chunk-size" and "cache-workers" this footprint
|
||||
can increase if there are parallel streams too (multiple files being read
|
||||
at the same time).
|
||||
|
||||
If the hardware permits it, use this feature to provide an overall better
|
||||
performance during streaming but it can also be disabled if RAM is not
|
||||
available on the local machine.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "rps",
|
||||
Default: int(DefCacheRps),
|
||||
Help: "Limits the number of requests per second to the source FS. -1 disables the rate limiter",
|
||||
Name: "rps",
|
||||
Default: int(DefCacheRps),
|
||||
Help: `Limits the number of requests per second to the source FS (-1 to disable)
|
||||
|
||||
This setting places a hard limit on the number of requests per second
|
||||
that cache will be doing to the cloud provider remote and try to
|
||||
respect that value by setting waits between reads.
|
||||
|
||||
If you find that you're getting banned or limited on the cloud
|
||||
provider through cache and know that a smaller number of requests per
|
||||
second will allow you to work with it then you can use this setting
|
||||
for that.
|
||||
|
||||
A good balance of all the other settings should make this setting
|
||||
useless but it is available to set for more special cases.
|
||||
|
||||
**NOTE**: This will limit the number of requests during streams but
|
||||
other API calls to the cloud provider like directory listings will
|
||||
still pass.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "writes",
|
||||
Default: DefCacheWrites,
|
||||
Help: "Will cache file data on writes through the FS",
|
||||
Name: "writes",
|
||||
Default: DefCacheWrites,
|
||||
Help: `Cache file data on writes through the FS
|
||||
|
||||
If you need to read files immediately after you upload them through
|
||||
cache you can enable this flag to have their data stored in the
|
||||
cache store at the same time during upload.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "tmp_upload_path",
|
||||
Default: "",
|
||||
Help: "Directory to keep temporary files until they are uploaded to the cloud storage",
|
||||
Name: "tmp_upload_path",
|
||||
Default: "",
|
||||
Help: `Directory to keep temporary files until they are uploaded.
|
||||
|
||||
This is the path where cache will use as a temporary storage for new
|
||||
files that need to be uploaded to the cloud provider.
|
||||
|
||||
Specifying a value will enable this feature. Without it, it is
|
||||
completely disabled and files will be uploaded directly to the cloud
|
||||
provider`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "tmp_wait_time",
|
||||
Default: DefCacheTmpWaitTime,
|
||||
Help: "How long should files be stored in local cache before being uploaded",
|
||||
Name: "tmp_wait_time",
|
||||
Default: DefCacheTmpWaitTime,
|
||||
Help: `How long should files be stored in local cache before being uploaded
|
||||
|
||||
This is the duration that a file must wait in the temporary location
|
||||
_cache-tmp-upload-path_ before it is selected for upload.
|
||||
|
||||
Note that only one file is uploaded at a time and it can take longer
|
||||
to start the upload if a queue formed for this purpose.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "db_wait_time",
|
||||
Default: DefCacheDbWaitTime,
|
||||
Help: "How long to wait for the DB to be available - 0 is unlimited",
|
||||
Name: "db_wait_time",
|
||||
Default: DefCacheDbWaitTime,
|
||||
Help: `How long to wait for the DB to be available - 0 is unlimited
|
||||
|
||||
Only one process can have the DB open at any one time, so rclone waits
|
||||
for this duration for the DB to become available before it gives an
|
||||
error.
|
||||
|
||||
If you set it to 0 then it will wait forever.`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
@@ -193,6 +290,7 @@ type Options struct {
|
||||
PlexUsername string `config:"plex_username"`
|
||||
PlexPassword string `config:"plex_password"`
|
||||
PlexToken string `config:"plex_token"`
|
||||
PlexInsecure bool `config:"plex_insecure"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
InfoAge fs.Duration `config:"info_age"`
|
||||
ChunkTotalSize fs.SizeSuffix `config:"chunk_total_size"`
|
||||
@@ -248,7 +346,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
|
||||
return nil, errors.Errorf("don't set cache-total-chunk-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||
return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
|
||||
}
|
||||
|
||||
@@ -261,10 +359,15 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
||||
}
|
||||
|
||||
remotePath := path.Join(opt.Remote, rpath)
|
||||
wrappedFs, wrapErr := fs.NewFs(remotePath)
|
||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(opt.Remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
|
||||
}
|
||||
|
||||
remotePath := fspath.JoinRootPath(wPath, rootPath)
|
||||
wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
|
||||
}
|
||||
var fsErr error
|
||||
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
||||
@@ -290,7 +393,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.plexConnector = &plexConnector{}
|
||||
if opt.PlexURL != "" {
|
||||
if opt.PlexToken != "" {
|
||||
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken)
|
||||
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||
}
|
||||
@@ -300,7 +403,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, func(token string) {
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -368,7 +471,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
fs.Infof(name, "Chunk Clean Interval: %v", f.opt.ChunkCleanInterval)
|
||||
fs.Infof(name, "Workers: %v", f.opt.TotalWorkers)
|
||||
fs.Infof(name, "File Age: %v", f.opt.InfoAge)
|
||||
if !f.opt.StoreWrites {
|
||||
if f.opt.StoreWrites {
|
||||
fs.Infof(name, "Cache Writes: enabled")
|
||||
}
|
||||
|
||||
@@ -403,7 +506,9 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}()
|
||||
|
||||
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
||||
doChangeNotify(f.receiveChangeNotify, time.Duration(f.opt.ChunkCleanInterval))
|
||||
pollInterval := make(chan time.Duration, 1)
|
||||
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
||||
doChangeNotify(f.receiveChangeNotify, pollInterval)
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
@@ -455,6 +560,39 @@ Eg
|
||||
Title: "Get cache stats",
|
||||
Help: `
|
||||
Show statistics for the cache remote.
|
||||
`,
|
||||
})
|
||||
|
||||
rc.Add(rc.Call{
|
||||
Path: "cache/fetch",
|
||||
Fn: f.rcFetch,
|
||||
Title: "Fetch file chunks",
|
||||
Help: `
|
||||
Ensure the specified file chunks are cached on disk.
|
||||
|
||||
The chunks= parameter specifies the file chunks to check.
|
||||
It takes a comma separated list of array slice indices.
|
||||
The slice indices are similar to Python slices: start[:end]
|
||||
|
||||
start is the 0 based chunk number from the beginning of the file
|
||||
to fetch inclusive. end is 0 based chunk number from the beginning
|
||||
of the file to fetch exclusive.
|
||||
Both values can be negative, in which case they count from the back
|
||||
of the file. The value "-5:" represents the last 5 chunks of a file.
|
||||
|
||||
Some valid examples are:
|
||||
":5,-5:" -> the first and last five chunks
|
||||
"0,-2" -> the first and the second last chunk
|
||||
"0:10" -> the first ten chunks
|
||||
|
||||
Any parameter with a key that starts with "file" can be used to
|
||||
specify files to fetch, eg
|
||||
|
||||
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
|
||||
|
||||
File names will automatically be encrypted when the a crypt remote
|
||||
is used on top of the cache.
|
||||
|
||||
`,
|
||||
})
|
||||
|
||||
@@ -472,6 +610,22 @@ func (f *Fs) httpStats(in rc.Params) (out rc.Params, err error) {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (f *Fs) unwrapRemote(remote string) string {
|
||||
remote = cleanPath(remote)
|
||||
if remote != "" {
|
||||
// if it's wrapped by crypt we need to check what format we got
|
||||
if cryptFs, yes := f.isWrappedByCrypt(); yes {
|
||||
_, err := cryptFs.DecryptFileName(remote)
|
||||
// if it failed to decrypt then it is a decrypted format and we need to encrypt it
|
||||
if err != nil {
|
||||
return cryptFs.EncryptFileName(remote)
|
||||
}
|
||||
// else it's an encrypted format and we can use it as it is
|
||||
}
|
||||
}
|
||||
return remote
|
||||
}
|
||||
|
||||
func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
|
||||
out = make(rc.Params)
|
||||
remoteInt, ok := in["remote"]
|
||||
@@ -485,20 +639,9 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
|
||||
withData = true
|
||||
}
|
||||
|
||||
if cleanPath(remote) != "" {
|
||||
// if it's wrapped by crypt we need to check what format we got
|
||||
if cryptFs, yes := f.isWrappedByCrypt(); yes {
|
||||
_, err := cryptFs.DecryptFileName(remote)
|
||||
// if it failed to decrypt then it is a decrypted format and we need to encrypt it
|
||||
if err != nil {
|
||||
remote = cryptFs.EncryptFileName(remote)
|
||||
}
|
||||
// else it's an encrypted format and we can use it as it is
|
||||
}
|
||||
|
||||
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
|
||||
return out, errors.Errorf("%s doesn't exist in cache", remote)
|
||||
}
|
||||
remote = f.unwrapRemote(remote)
|
||||
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
|
||||
return out, errors.Errorf("%s doesn't exist in cache", remote)
|
||||
}
|
||||
|
||||
co := NewObject(f, remote)
|
||||
@@ -528,6 +671,141 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
|
||||
type chunkRange struct {
|
||||
start, end int64
|
||||
}
|
||||
parseChunks := func(ranges string) (crs []chunkRange, err error) {
|
||||
for _, part := range strings.Split(ranges, ",") {
|
||||
var start, end int64 = 0, math.MaxInt64
|
||||
switch ints := strings.Split(part, ":"); len(ints) {
|
||||
case 1:
|
||||
start, err = strconv.ParseInt(ints[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
}
|
||||
end = start + 1
|
||||
case 2:
|
||||
if ints[0] != "" {
|
||||
start, err = strconv.ParseInt(ints[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
}
|
||||
}
|
||||
if ints[1] != "" {
|
||||
end, err = strconv.ParseInt(ints[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
}
|
||||
crs = append(crs, chunkRange{start: start, end: end})
|
||||
}
|
||||
return
|
||||
}
|
||||
walkChunkRange := func(cr chunkRange, size int64, cb func(chunk int64)) {
|
||||
if size <= 0 {
|
||||
return
|
||||
}
|
||||
chunks := (size-1)/f.ChunkSize() + 1
|
||||
|
||||
start, end := cr.start, cr.end
|
||||
if start < 0 {
|
||||
start += chunks
|
||||
}
|
||||
if end <= 0 {
|
||||
end += chunks
|
||||
}
|
||||
if end <= start {
|
||||
return
|
||||
}
|
||||
switch {
|
||||
case start < 0:
|
||||
start = 0
|
||||
case start >= chunks:
|
||||
return
|
||||
}
|
||||
switch {
|
||||
case end <= start:
|
||||
end = start + 1
|
||||
case end >= chunks:
|
||||
end = chunks
|
||||
}
|
||||
for i := start; i < end; i++ {
|
||||
cb(i)
|
||||
}
|
||||
}
|
||||
walkChunkRanges := func(crs []chunkRange, size int64, cb func(chunk int64)) {
|
||||
for _, cr := range crs {
|
||||
walkChunkRange(cr, size, cb)
|
||||
}
|
||||
}
|
||||
|
||||
v, ok := in["chunks"]
|
||||
if !ok {
|
||||
return nil, errors.New("missing chunks parameter")
|
||||
}
|
||||
s, ok := v.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("invalid chunks parameter")
|
||||
}
|
||||
delete(in, "chunks")
|
||||
crs, err := parseChunks(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid chunks parameter")
|
||||
}
|
||||
var files [][2]string
|
||||
for k, v := range in {
|
||||
if !strings.HasPrefix(k, "file") {
|
||||
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
|
||||
}
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
files = append(files, [2]string{v, f.unwrapRemote(v)})
|
||||
default:
|
||||
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
|
||||
}
|
||||
}
|
||||
type fileStatus struct {
|
||||
Error string
|
||||
FetchedChunks int
|
||||
}
|
||||
fetchedChunks := make(map[string]fileStatus, len(files))
|
||||
for _, pair := range files {
|
||||
file, remote := pair[0], pair[1]
|
||||
var status fileStatus
|
||||
o, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
fetchedChunks[file] = fileStatus{Error: err.Error()}
|
||||
continue
|
||||
}
|
||||
co := o.(*Object)
|
||||
err = co.refreshFromSource(true)
|
||||
if err != nil {
|
||||
fetchedChunks[file] = fileStatus{Error: err.Error()}
|
||||
continue
|
||||
}
|
||||
handle := NewObjectHandle(co, f)
|
||||
handle.UseMemory = false
|
||||
handle.scaleWorkers(1)
|
||||
walkChunkRanges(crs, co.Size(), func(chunk int64) {
|
||||
_, err := handle.getChunk(chunk * f.ChunkSize())
|
||||
if err != nil {
|
||||
if status.Error == "" {
|
||||
status.Error = err.Error()
|
||||
}
|
||||
} else {
|
||||
status.FetchedChunks++
|
||||
}
|
||||
})
|
||||
fetchedChunks[file] = status
|
||||
}
|
||||
|
||||
return rc.Params{"status": fetchedChunks}, nil
|
||||
}
|
||||
|
||||
// receiveChangeNotify is a wrapper to notifications sent from the wrapped FS about changed files
|
||||
func (f *Fs) receiveChangeNotify(forgetPath string, entryType fs.EntryType) {
|
||||
if crypt, yes := f.isWrappedByCrypt(); yes {
|
||||
@@ -592,15 +870,18 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
|
||||
}
|
||||
}
|
||||
|
||||
// ChangeNotify can subsribe multiple callers
|
||||
// ChangeNotify can subscribe multiple callers
|
||||
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
|
||||
// and also notifies other caches (i.e VFS) to clear out whenever something changes
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||
f.parentsForgetMu.Lock()
|
||||
defer f.parentsForgetMu.Unlock()
|
||||
fs.Debugf(f, "subscribing to ChangeNotify")
|
||||
f.parentsForgetFn = append(f.parentsForgetFn, notifyFunc)
|
||||
return make(chan bool)
|
||||
go func() {
|
||||
for range pollInterval {
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
@@ -701,7 +982,6 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
fs.Debugf(dir, "list: cached entries: %v", entries)
|
||||
return entries, nil
|
||||
}
|
||||
// FIXME need to clean existing cached listing
|
||||
|
||||
// we first search any temporary files stored locally
|
||||
var cachedEntries fs.DirEntries
|
||||
@@ -727,27 +1007,42 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
|
||||
// search from the source
|
||||
entries, err = f.Fs.List(dir)
|
||||
sourceEntries, err := f.Fs.List(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(dir, "list: read %v from source", len(entries))
|
||||
fs.Debugf(dir, "list: source entries: %v", entries)
|
||||
fs.Debugf(dir, "list: read %v from source", len(sourceEntries))
|
||||
fs.Debugf(dir, "list: source entries: %v", sourceEntries)
|
||||
|
||||
sort.Sort(sourceEntries)
|
||||
for _, entry := range entries {
|
||||
entryRemote := entry.Remote()
|
||||
i := sort.Search(len(sourceEntries), func(i int) bool { return sourceEntries[i].Remote() >= entryRemote })
|
||||
if i < len(sourceEntries) && sourceEntries[i].Remote() == entryRemote {
|
||||
continue
|
||||
}
|
||||
fp := path.Join(f.Root(), entryRemote)
|
||||
switch entry.(type) {
|
||||
case fs.Object:
|
||||
_ = f.cache.RemoveObject(fp)
|
||||
case fs.Directory:
|
||||
_ = f.cache.RemoveDir(fp)
|
||||
}
|
||||
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
|
||||
}
|
||||
entries = nil
|
||||
|
||||
// and then iterate over the ones from source (temp Objects will override source ones)
|
||||
var batchDirectories []*Directory
|
||||
for _, entry := range entries {
|
||||
sort.Sort(cachedEntries)
|
||||
tmpCnt := len(cachedEntries)
|
||||
for _, entry := range sourceEntries {
|
||||
switch o := entry.(type) {
|
||||
case fs.Object:
|
||||
// skip over temporary objects (might be uploading)
|
||||
found := false
|
||||
for _, t := range cachedEntries {
|
||||
if t.Remote() == o.Remote() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
oRemote := o.Remote()
|
||||
i := sort.Search(tmpCnt, func(i int) bool { return cachedEntries[i].Remote() >= oRemote })
|
||||
if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
|
||||
continue
|
||||
}
|
||||
co := ObjectFromOriginal(f, o).persist()
|
||||
@@ -1254,7 +1549,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
if srcObj.isTempFile() {
|
||||
// we check if the feature is stil active
|
||||
// we check if the feature is still active
|
||||
if f.opt.TempWritePath == "" {
|
||||
fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
|
||||
return nil, fs.ErrorCantCopy
|
||||
@@ -1330,7 +1625,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
|
||||
// if this is a temp object then we perform the changes locally
|
||||
if srcObj.isTempFile() {
|
||||
// we check if the feature is stil active
|
||||
// we check if the feature is still active
|
||||
if f.opt.TempWritePath == "" {
|
||||
fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
|
||||
return nil, fs.ErrorCantMove
|
||||
|
||||
66
backend/cache/cache_internal_test.go
vendored
66
backend/cache/cache_internal_test.go
vendored
@@ -4,6 +4,9 @@ package cache_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
@@ -12,21 +15,12 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"encoding/base64"
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
@@ -36,10 +30,11 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/object"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fs/rc/rcflags"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -392,10 +387,10 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
|
||||
// write the object
|
||||
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
||||
require.Equal(t, o.Size(), int64(testSize))
|
||||
require.Equal(t, o.Size(), testSize)
|
||||
time.Sleep(time.Second * 3)
|
||||
|
||||
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
|
||||
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||
|
||||
@@ -695,8 +690,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
rcflags.Opt.Enabled = true
|
||||
rc.Start(&rcflags.Opt)
|
||||
cacheExpire := rc.Calls.Get("cache/expire")
|
||||
assert.NotNil(t, cacheExpire)
|
||||
|
||||
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
@@ -729,13 +724,9 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
||||
|
||||
m := make(map[string]string)
|
||||
res, err := http.Post(fmt.Sprintf("http://localhost:5572/cache/expire?remote=%s", "data.bin"), "application/json; charset=utf-8", strings.NewReader(""))
|
||||
// Call the rc function
|
||||
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = res.Body.Close()
|
||||
}()
|
||||
_ = json.NewDecoder(res.Body).Decode(&m)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
@@ -745,23 +736,21 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
co, err = rootFs.NewObject("data.bin")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
|
||||
li1, err := runInstance.list(t, rootFs, "")
|
||||
_, err = runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testData2 := randStringBytes(int(chunkSize))
|
||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
|
||||
|
||||
// list should have 1 item only
|
||||
li1, err = runInstance.list(t, rootFs, "")
|
||||
li1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, li1, 1)
|
||||
|
||||
m = make(map[string]string)
|
||||
res2, err := http.Post("http://localhost:5572/cache/expire?remote=/", "application/json; charset=utf-8", strings.NewReader(""))
|
||||
// Call the rc function
|
||||
m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = res2.Body.Close()
|
||||
}()
|
||||
_ = json.NewDecoder(res2.Body).Decode(&m)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
@@ -769,6 +758,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
|
||||
// list should have 2 items now
|
||||
li2, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, li2, 2)
|
||||
}
|
||||
|
||||
@@ -1505,7 +1495,8 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
var err error
|
||||
|
||||
if r.useMount {
|
||||
f, err := os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
var f *os.File
|
||||
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1515,7 +1506,8 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
}()
|
||||
_, err = f.WriteString(data + append)
|
||||
} else {
|
||||
obj1, err := rootFs.NewObject(src)
|
||||
var obj1 fs.Object
|
||||
obj1, err = rootFs.NewObject(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1647,15 +1639,13 @@ func (r *run) getCacheFs(f fs.Fs) (*cache.Fs, error) {
|
||||
cfs, ok := f.(*cache.Fs)
|
||||
if ok {
|
||||
return cfs, nil
|
||||
} else {
|
||||
if f.Features().UnWrap != nil {
|
||||
cfs, ok := f.Features().UnWrap().(*cache.Fs)
|
||||
if ok {
|
||||
return cfs, nil
|
||||
}
|
||||
}
|
||||
if f.Features().UnWrap != nil {
|
||||
cfs, ok := f.Features().UnWrap().(*cache.Fs)
|
||||
if ok {
|
||||
return cfs, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("didn't found a cache fs")
|
||||
}
|
||||
|
||||
|
||||
3
backend/cache/cache_upload_test.go
vendored
3
backend/cache/cache_upload_test.go
vendored
@@ -3,6 +3,7 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
@@ -10,8 +11,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
|
||||
455
backend/cache/cache_upload_test.go.orig
vendored
455
backend/cache/cache_upload_test.go.orig
vendored
@@ -1,455 +0,0 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
|
||||
// create some rand test data
|
||||
testSize := int64(524288000)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
|
||||
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(524416032), ti.Size())
|
||||
} else {
|
||||
require.Equal(t, testSize, ti.Size())
|
||||
}
|
||||
de1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
runInstance.completeBackgroundUpload(t, "one", bu)
|
||||
// check if it was removed from temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if it can be read
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, data2, 1024)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(10485760)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
time.Sleep(time.Second * 5)
|
||||
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
//require.NoError(t, err)
|
||||
|
||||
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
|
||||
require.NoError(t, err)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(1048576)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
testReader2 := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
|
||||
require.False(t, os.IsNotExist(err))
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
require.NoError(t, err)
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
totalFiles := 10
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
|
||||
|
||||
if runInstance.wrappedIsExternal && i < totalFiles-1 {
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
lastFile = remote
|
||||
}
|
||||
|
||||
// check if cache lists all files, likely temp upload didn't finish yet
|
||||
de1, err := runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
|
||||
// wait for background uploader to do its thing
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
|
||||
|
||||
// retry until we have no more temp files and fail if they don't go down to 0
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if cache lists all files
|
||||
de1, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove - allowed
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("second/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.Error(t, err)
|
||||
var started bool
|
||||
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
|
||||
require.NoError(t, err)
|
||||
require.False(t, started)
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Rmdir - allowed
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "directory not empty")
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.False(t, started)
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename -- allowed
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove -- allowed
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update -- allowed
|
||||
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||
require.NoError(t, err)
|
||||
obj2, err := rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||
require.Equal(t, "one content updated", string(data2))
|
||||
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(67), tmpInfo.Size())
|
||||
} else {
|
||||
require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
}
|
||||
|
||||
// test SetModTime -- allowed
|
||||
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, secondModTime, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
|
||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Rmdir
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.Error(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update - this seems to work. Why? FIXME
|
||||
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
|
||||
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
|
||||
// require.Equal(t, "one content", string(data2))
|
||||
//
|
||||
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
// require.NoError(t, err)
|
||||
// if runInstance.rootIsCrypt {
|
||||
// require.Equal(t, int64(67), tmpInfo.Size())
|
||||
// } else {
|
||||
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
// }
|
||||
//})
|
||||
//require.Error(t, err)
|
||||
|
||||
// test SetModTime -- seems to work cause of previous
|
||||
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//require.Equal(t, secondModTime, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
12
backend/cache/cache_upload_test.go.rej
vendored
12
backend/cache/cache_upload_test.go.rej
vendored
@@ -1,12 +0,0 @@
|
||||
--- cache_upload_test.go
|
||||
+++ cache_upload_test.go
|
||||
@@ -1500,9 +1469,6 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
}
|
||||
r.tempFiles = nil
|
||||
debug.FreeOSMemory()
|
||||
- for k, v := range r.runDefaultFlagMap {
|
||||
- _ = flag.Set(k, v)
|
||||
- }
|
||||
}
|
||||
|
||||
func (r *run) randomBytes(t *testing.T, size int64) []byte {
|
||||
3
backend/cache/directory.go
vendored
3
backend/cache/directory.go
vendored
@@ -3,9 +3,8 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
85
backend/cache/handle.go
vendored
85
backend/cache/handle.go
vendored
@@ -5,12 +5,11 @@ package cache
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
@@ -49,12 +48,13 @@ type Handle struct {
|
||||
offset int64
|
||||
seenOffsets map[int64]bool
|
||||
mu sync.Mutex
|
||||
workersWg sync.WaitGroup
|
||||
confirmReading chan bool
|
||||
|
||||
UseMemory bool
|
||||
workers []*worker
|
||||
closed bool
|
||||
reading bool
|
||||
workers int
|
||||
maxWorkerID int
|
||||
UseMemory bool
|
||||
closed bool
|
||||
reading bool
|
||||
}
|
||||
|
||||
// NewObjectHandle returns a new Handle for an existing Object
|
||||
@@ -95,7 +95,7 @@ func (r *Handle) String() string {
|
||||
|
||||
// startReadWorkers will start the worker pool
|
||||
func (r *Handle) startReadWorkers() {
|
||||
if r.hasAtLeastOneWorker() {
|
||||
if r.workers > 0 {
|
||||
return
|
||||
}
|
||||
totalWorkers := r.cacheFs().opt.TotalWorkers
|
||||
@@ -117,26 +117,27 @@ func (r *Handle) startReadWorkers() {
|
||||
|
||||
// scaleOutWorkers will increase the worker pool count by the provided amount
|
||||
func (r *Handle) scaleWorkers(desired int) {
|
||||
current := len(r.workers)
|
||||
current := r.workers
|
||||
if current == desired {
|
||||
return
|
||||
}
|
||||
if current > desired {
|
||||
// scale in gracefully
|
||||
for i := 0; i < current-desired; i++ {
|
||||
for r.workers > desired {
|
||||
r.preloadQueue <- -1
|
||||
r.workers--
|
||||
}
|
||||
} else {
|
||||
// scale out
|
||||
for i := 0; i < desired-current; i++ {
|
||||
for r.workers < desired {
|
||||
w := &worker{
|
||||
r: r,
|
||||
ch: r.preloadQueue,
|
||||
id: current + i,
|
||||
id: r.maxWorkerID,
|
||||
}
|
||||
r.workersWg.Add(1)
|
||||
r.workers++
|
||||
r.maxWorkerID++
|
||||
go w.run()
|
||||
|
||||
r.workers = append(r.workers, w)
|
||||
}
|
||||
}
|
||||
// ignore first scale out from 0
|
||||
@@ -148,7 +149,7 @@ func (r *Handle) scaleWorkers(desired int) {
|
||||
func (r *Handle) confirmExternalReading() {
|
||||
// if we have a max value of workers
|
||||
// then we skip this step
|
||||
if len(r.workers) > 1 ||
|
||||
if r.workers > 1 ||
|
||||
!r.cacheFs().plexConnector.isConfigured() {
|
||||
return
|
||||
}
|
||||
@@ -178,7 +179,7 @@ func (r *Handle) queueOffset(offset int64) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
for i := 0; i < r.workers; i++ {
|
||||
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||
if o < 0 || o >= r.cachedObject.Size() {
|
||||
continue
|
||||
@@ -193,16 +194,6 @@ func (r *Handle) queueOffset(offset int64) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Handle) hasAtLeastOneWorker() bool {
|
||||
oneWorker := false
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
if r.workers[i].isRunning() {
|
||||
oneWorker = true
|
||||
}
|
||||
}
|
||||
return oneWorker
|
||||
}
|
||||
|
||||
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
|
||||
// it can be from transient or persistent cache
|
||||
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
|
||||
@@ -243,7 +234,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
// not found in ram or
|
||||
// the worker didn't managed to download the chunk in time so we abort and close the stream
|
||||
if err != nil || len(data) == 0 || !found {
|
||||
if !r.hasAtLeastOneWorker() {
|
||||
if r.workers == 0 {
|
||||
fs.Errorf(r, "out of workers")
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
@@ -304,14 +295,7 @@ func (r *Handle) Close() error {
|
||||
close(r.preloadQueue)
|
||||
r.closed = true
|
||||
// wait for workers to complete their jobs before returning
|
||||
waitCount := 3
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
waitIdx := 0
|
||||
for r.workers[i].isRunning() && waitIdx < waitCount {
|
||||
time.Sleep(time.Second)
|
||||
waitIdx++
|
||||
}
|
||||
}
|
||||
r.workersWg.Wait()
|
||||
r.memory.db.Flush()
|
||||
|
||||
fs.Debugf(r, "cache reader closed %v", r.offset)
|
||||
@@ -348,12 +332,9 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
}
|
||||
|
||||
type worker struct {
|
||||
r *Handle
|
||||
ch <-chan int64
|
||||
rc io.ReadCloser
|
||||
id int
|
||||
running bool
|
||||
mu sync.Mutex
|
||||
r *Handle
|
||||
rc io.ReadCloser
|
||||
id int
|
||||
}
|
||||
|
||||
// String is a representation of this worker
|
||||
@@ -398,33 +379,19 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
||||
})
|
||||
}
|
||||
|
||||
func (w *worker) isRunning() bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.running
|
||||
}
|
||||
|
||||
func (w *worker) setRunning(f bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.running = f
|
||||
}
|
||||
|
||||
// run is the main loop for the worker which receives offsets to preload
|
||||
func (w *worker) run() {
|
||||
var err error
|
||||
var data []byte
|
||||
defer w.setRunning(false)
|
||||
defer func() {
|
||||
if w.rc != nil {
|
||||
_ = w.rc.Close()
|
||||
w.setRunning(false)
|
||||
}
|
||||
w.r.workersWg.Done()
|
||||
}()
|
||||
|
||||
for {
|
||||
chunkStart, open := <-w.ch
|
||||
w.setRunning(true)
|
||||
chunkStart, open := <-w.r.preloadQueue
|
||||
if chunkStart < 0 || !open {
|
||||
break
|
||||
}
|
||||
|
||||
10
backend/cache/object.go
vendored
10
backend/cache/object.go
vendored
@@ -208,11 +208,17 @@ func (o *Object) SetModTime(t time.Time) error {
|
||||
|
||||
// Open is used to request a specific part of the file using fs.RangeOption
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
if err := o.refreshFromSource(true); err != nil {
|
||||
var err error
|
||||
|
||||
if o.Object == nil {
|
||||
err = o.refreshFromSource(true)
|
||||
} else {
|
||||
err = o.refresh()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
cacheReader := NewObjectHandle(o, o.CacheFs)
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
|
||||
38
backend/cache/plex.go
vendored
38
backend/cache/plex.go
vendored
@@ -3,20 +3,19 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
@@ -54,6 +53,7 @@ type plexConnector struct {
|
||||
username string
|
||||
password string
|
||||
token string
|
||||
insecure bool
|
||||
f *Fs
|
||||
mu sync.Mutex
|
||||
running bool
|
||||
@@ -63,7 +63,7 @@ type plexConnector struct {
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnector(f *Fs, plexURL, username, password string, saveToken func(string)) (*plexConnector, error) {
|
||||
func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool, saveToken func(string)) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -75,6 +75,7 @@ func newPlexConnector(f *Fs, plexURL, username, password string, saveToken func(
|
||||
username: username,
|
||||
password: password,
|
||||
token: "",
|
||||
insecure: insecure,
|
||||
stateCache: cache.New(time.Hour, time.Minute),
|
||||
saveToken: saveToken,
|
||||
}
|
||||
@@ -83,7 +84,7 @@ func newPlexConnector(f *Fs, plexURL, username, password string, saveToken func(
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
|
||||
func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -93,6 +94,7 @@ func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, er
|
||||
f: f,
|
||||
url: u,
|
||||
token: token,
|
||||
insecure: insecure,
|
||||
stateCache: cache.New(time.Hour, time.Minute),
|
||||
}
|
||||
pc.listenWebsocket()
|
||||
@@ -107,14 +109,26 @@ func (p *plexConnector) closeWebsocket() {
|
||||
p.running = false
|
||||
}
|
||||
|
||||
func (p *plexConnector) websocketDial() (*websocket.Conn, error) {
|
||||
u := strings.TrimRight(strings.Replace(strings.Replace(
|
||||
p.url.String(), "http://", "ws://", 1), "https://", "wss://", 1), "/")
|
||||
url := fmt.Sprintf(defPlexNotificationURL, u, p.token)
|
||||
|
||||
config, err := websocket.NewConfig(url, "http://localhost")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.insecure {
|
||||
config.TlsConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
}
|
||||
return websocket.DialConfig(config)
|
||||
}
|
||||
|
||||
func (p *plexConnector) listenWebsocket() {
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
|
||||
u := strings.Replace(p.url.String(), "http://", "ws://", 1)
|
||||
u = strings.Replace(u, "https://", "wss://", 1)
|
||||
conn, err := websocket.Dial(fmt.Sprintf(defPlexNotificationURL, strings.TrimRight(u, "/"), p.token),
|
||||
"", "http://localhost")
|
||||
conn, err := p.websocketDial()
|
||||
if err != nil {
|
||||
fs.Errorf("plex", "%v", err)
|
||||
return
|
||||
|
||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
||||
15
backend/cache/storage_persistent.go
vendored
15
backend/cache/storage_persistent.go
vendored
@@ -3,20 +3,17 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"io/ioutil"
|
||||
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
bolt "github.com/coreos/bbolt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
@@ -401,7 +398,7 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
err = bucket.Put([]byte(cachedObject.Name), []byte(encoded))
|
||||
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
@@ -812,7 +809,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||
}
|
||||
err = bucket.Put([]byte(destPath), []byte(encoded))
|
||||
err = bucket.Put([]byte(destPath), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
}
|
||||
@@ -1052,7 +1049,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||
}
|
||||
err = bucket.Put([]byte(destPath), []byte(encoded))
|
||||
err = bucket.Put([]byte(destPath), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
}
|
||||
|
||||
@@ -17,11 +17,9 @@ import (
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/rfjakob/eme"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
|
||||
"github.com/rfjakob/eme"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -43,6 +41,7 @@ var (
|
||||
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
|
||||
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
|
||||
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
|
||||
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
|
||||
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
|
||||
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
|
||||
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
|
||||
@@ -286,6 +285,9 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
// not possible if decodeFilename() working correctly
|
||||
return "", ErrorTooShortAfterDecode
|
||||
}
|
||||
if len(rawCiphertext) > 2048 {
|
||||
return "", ErrorTooLongAfterDecode
|
||||
}
|
||||
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
|
||||
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
|
||||
if err != nil {
|
||||
@@ -461,7 +463,7 @@ func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
if int(newRune) < base {
|
||||
newRune += 256
|
||||
}
|
||||
_, _ = result.WriteRune(rune(newRune))
|
||||
_, _ = result.WriteRune(newRune)
|
||||
|
||||
default:
|
||||
_, _ = result.WriteRune(runeValue)
|
||||
@@ -746,7 +748,7 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) {
|
||||
return nil, fh.finishAndClose(ErrorEncryptedBadMagic)
|
||||
}
|
||||
// retreive the nonce
|
||||
// retrieve the nonce
|
||||
fh.nonce.fromBuf(readBuf[fileMagicSize:])
|
||||
fh.initialNonce = fh.nonce
|
||||
return fh, nil
|
||||
|
||||
@@ -194,6 +194,10 @@ func TestEncryptSegment(t *testing.T) {
|
||||
|
||||
func TestDecryptSegment(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 3328)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -201,6 +205,7 @@ func TestDecryptSegment(t *testing.T) {
|
||||
}{
|
||||
{"64=", ErrorBadBase32Encoding},
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
|
||||
@@ -4,7 +4,6 @@ package crypt
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -13,6 +12,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -67,8 +67,16 @@ func init() {
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "show_mapping",
|
||||
Help: "For all files listed show how the names encrypt.",
|
||||
Name: "show_mapping",
|
||||
Help: `For all files listed show how the names encrypt.
|
||||
|
||||
If this flag is set then for each file that the remote is asked to
|
||||
list, it will log (at level INFO) a line stating the decrypted file
|
||||
name and the encrypted file name.
|
||||
|
||||
This is so you can work out which encrypted names are which decrypted
|
||||
names just in case you need to do something with the encrypted file
|
||||
names, or for debugging purposes.`,
|
||||
Default: false,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
@@ -114,7 +122,7 @@ func NewCipher(m configmap.Mapper) (Cipher, error) {
|
||||
return newCipherForConfig(opt)
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -130,16 +138,20 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||
}
|
||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := path.Join(remote, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err := fs.NewFs(remotePath)
|
||||
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
// if that didn't produce a file, look for a directory
|
||||
if err != fs.ErrorIsFile {
|
||||
remotePath = path.Join(remote, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = fs.NewFs(remotePath)
|
||||
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
|
||||
}
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remotePath)
|
||||
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
|
||||
}
|
||||
f := &Fs{
|
||||
Fs: wrappedFs,
|
||||
@@ -161,7 +173,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
doChangeNotify := wrappedFs.Features().ChangeNotify
|
||||
if doChangeNotify != nil {
|
||||
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
decrypted, err := f.DecryptFileName(path)
|
||||
if err != nil {
|
||||
@@ -170,7 +182,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
notifyFunc(decrypted, entryType)
|
||||
}
|
||||
return doChangeNotify(wrappedNotifyFunc, pollInterval)
|
||||
doChangeNotify(wrappedNotifyFunc, pollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -543,7 +555,7 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
|
||||
}
|
||||
|
||||
// ComputeHash takes the nonce from o, and encrypts the contents of
|
||||
// src with it, and calcuates the hash given by HashType on the fly
|
||||
// src with it, and calculates the hash given by HashType on the fly
|
||||
//
|
||||
// Note that we break lots of encapsulation in this function.
|
||||
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||
|
||||
@@ -7,13 +7,30 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive" // for integration tests
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/swift" // for integration tests
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// TestStandard runs integration tests against the remote
|
||||
func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -30,6 +47,9 @@ func TestStandard(t *testing.T) {
|
||||
|
||||
// TestOff runs integration tests against the remote
|
||||
func TestOff(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
|
||||
name := "TestCrypt2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -46,6 +66,9 @@ func TestOff(t *testing.T) {
|
||||
|
||||
// TestObfuscate runs integration tests against the remote
|
||||
func TestObfuscate(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,62 +1,82 @@
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/api/drive/v3"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/drive/v3"
|
||||
)
|
||||
|
||||
const exampleExportFormats = `{
|
||||
"application/vnd.google-apps.document": [
|
||||
"application/rtf",
|
||||
"application/vnd.oasis.opendocument.text",
|
||||
"text/html",
|
||||
"application/pdf",
|
||||
"application/epub+zip",
|
||||
"application/zip",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.spreadsheet": [
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet",
|
||||
"text/tab-separated-values",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"text/csv",
|
||||
"application/zip",
|
||||
"application/vnd.oasis.opendocument.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.jam": [
|
||||
"application/pdf"
|
||||
],
|
||||
"application/vnd.google-apps.script": [
|
||||
"application/vnd.google-apps.script+json"
|
||||
],
|
||||
"application/vnd.google-apps.presentation": [
|
||||
"application/vnd.oasis.opendocument.presentation",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.form": [
|
||||
"application/zip"
|
||||
],
|
||||
"application/vnd.google-apps.drawing": [
|
||||
"image/svg+xml",
|
||||
"image/png",
|
||||
"application/pdf",
|
||||
"image/jpeg"
|
||||
]
|
||||
}`
|
||||
func TestDriveScopes(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
wantFlag bool
|
||||
}{
|
||||
{"", []string{
|
||||
"https://www.googleapis.com/auth/drive",
|
||||
}, false},
|
||||
{" drive.file , drive.readonly", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.readonly",
|
||||
}, false},
|
||||
{" drive.file , drive.appfolder", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.appfolder",
|
||||
}, true},
|
||||
} {
|
||||
got := driveScopes(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
gotFlag := driveScopesContainsAppFolder(got)
|
||||
assert.Equal(t, test.wantFlag, gotFlag, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
var additionalMimeTypes = map[string]string{
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
|
||||
"application/vnd.ms-excel.template.macroenabled.12": ".xltm",
|
||||
"application/vnd.ms-powerpoint.presentation.macroenabled.12": ".pptm",
|
||||
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": ".ppsm",
|
||||
"application/vnd.ms-powerpoint.template.macroenabled.12": ".potm",
|
||||
"application/vnd.ms-powerpoint": ".ppt",
|
||||
"application/vnd.ms-word.document.macroenabled.12": ".docm",
|
||||
"application/vnd.ms-word.template.macroenabled.12": ".dotm",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.template": ".potx",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": ".xltx",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": ".dotx",
|
||||
"application/vnd.sun.xml.writer": ".sxw",
|
||||
"text/richtext": ".rtf",
|
||||
}
|
||||
*/
|
||||
|
||||
// Load the example export formats into exportFormats for testing
|
||||
func TestInternalLoadExampleExportFormats(t *testing.T) {
|
||||
exportFormatsOnce.Do(func() {})
|
||||
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &_exportFormats))
|
||||
func TestInternalLoadExampleFormats(t *testing.T) {
|
||||
fetchFormatsOnce.Do(func() {})
|
||||
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
|
||||
var about struct {
|
||||
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
|
||||
ImportFormats map[string][]string `json:"importFormats,omitempty"`
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, json.Unmarshal(buf, &about))
|
||||
_exportFormats = fixMimeTypeMap(about.ExportFormats)
|
||||
_importFormats = fixMimeTypeMap(about.ImportFormats)
|
||||
}
|
||||
|
||||
func TestInternalParseExtensions(t *testing.T) {
|
||||
@@ -65,27 +85,24 @@ func TestInternalParseExtensions(t *testing.T) {
|
||||
want []string
|
||||
wantErr error
|
||||
}{
|
||||
{"doc", []string{"doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
|
||||
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
|
||||
{"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
|
||||
{"doc", []string{".doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
|
||||
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
||||
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
||||
} {
|
||||
f := new(Fs)
|
||||
gotErr := f.parseExtensions(test.in)
|
||||
extensions, _, gotErr := parseExtensions(test.in)
|
||||
if test.wantErr == nil {
|
||||
assert.NoError(t, gotErr)
|
||||
} else {
|
||||
assert.EqualError(t, gotErr, test.wantErr.Error())
|
||||
}
|
||||
assert.Equal(t, test.want, f.extensions)
|
||||
assert.Equal(t, test.want, extensions)
|
||||
}
|
||||
|
||||
// Test it is appending
|
||||
f := new(Fs)
|
||||
assert.Nil(t, f.parseExtensions("docx,svg"))
|
||||
assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
|
||||
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
|
||||
|
||||
extensions, _, gotErr := parseExtensions("docx,svg", "docx,svg,xlsx")
|
||||
assert.NoError(t, gotErr)
|
||||
assert.Equal(t, []string{".docx", ".svg", ".xlsx"}, extensions)
|
||||
}
|
||||
|
||||
func TestInternalFindExportFormat(t *testing.T) {
|
||||
@@ -99,17 +116,17 @@ func TestInternalFindExportFormat(t *testing.T) {
|
||||
wantMimeType string
|
||||
}{
|
||||
{[]string{}, "", ""},
|
||||
{[]string{"pdf"}, "pdf", "application/pdf"},
|
||||
{[]string{"pdf", "rtf", "xls"}, "pdf", "application/pdf"},
|
||||
{[]string{"xls", "rtf", "pdf"}, "rtf", "application/rtf"},
|
||||
{[]string{"xls", "csv", "svg"}, "", ""},
|
||||
{[]string{".pdf"}, ".pdf", "application/pdf"},
|
||||
{[]string{".pdf", ".rtf", ".xls"}, ".pdf", "application/pdf"},
|
||||
{[]string{".xls", ".rtf", ".pdf"}, ".rtf", "application/rtf"},
|
||||
{[]string{".xls", ".csv", ".svg"}, "", ""},
|
||||
} {
|
||||
f := new(Fs)
|
||||
f.extensions = test.extensions
|
||||
f.exportExtensions = test.extensions
|
||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
|
||||
assert.Equal(t, test.wantExtension, gotExtension)
|
||||
if test.wantExtension != "" {
|
||||
assert.Equal(t, item.Name+"."+gotExtension, gotFilename)
|
||||
assert.Equal(t, item.Name+gotExtension, gotFilename)
|
||||
} else {
|
||||
assert.Equal(t, "", gotFilename)
|
||||
}
|
||||
@@ -117,3 +134,155 @@ func TestInternalFindExportFormat(t *testing.T) {
|
||||
assert.Equal(t, true, gotIsDocument)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMimeTypesToExtension(t *testing.T) {
|
||||
for mimeType, extension := range _mimeTypeToExtension {
|
||||
extensions, err := mime.ExtensionsByType(mimeType)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, extensions, extension)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionToMimeType(t *testing.T) {
|
||||
for mimeType, extension := range _mimeTypeToExtension {
|
||||
gotMimeType := mime.TypeByExtension(extension)
|
||||
mediatype, _, err := mime.ParseMediaType(gotMimeType)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, mimeType, mediatype)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionsForExportFormats(t *testing.T) {
|
||||
if _exportFormats == nil {
|
||||
t.Error("exportFormats == nil")
|
||||
}
|
||||
for fromMT, toMTs := range _exportFormats {
|
||||
for _, toMT := range toMTs {
|
||||
if !isInternalMimeType(toMT) {
|
||||
extensions, err := mime.ExtensionsByType(toMT)
|
||||
assert.NoError(t, err, "invalid MIME type %q", toMT)
|
||||
assert.NotEmpty(t, extensions, "No extension found for %q (from: %q)", fromMT, toMT)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionsForImportFormats(t *testing.T) {
|
||||
t.Skip()
|
||||
if _importFormats == nil {
|
||||
t.Error("_importFormats == nil")
|
||||
}
|
||||
for fromMT := range _importFormats {
|
||||
if !isInternalMimeType(fromMT) {
|
||||
extensions, err := mime.ExtensionsByType(fromMT)
|
||||
assert.NoError(t, err, "invalid MIME type %q", fromMT)
|
||||
assert.NotEmpty(t, extensions, "No extension found for %q", fromMT)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
oldAllow := f.opt.AllowImportNameChange
|
||||
f.opt.AllowImportNameChange = true
|
||||
defer func() {
|
||||
f.opt.AllowImportNameChange = oldAllow
|
||||
}()
|
||||
|
||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||
require.NoError(t, err)
|
||||
|
||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
|
||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||
require.NoError(t, err)
|
||||
|
||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentExport(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
|
||||
f.exportExtensions, _, err = parseExtensions("txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := f.NewObject("example2.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open()
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
_, err = io.Copy(&buf, rc)
|
||||
require.NoError(t, err)
|
||||
text := buf.String()
|
||||
|
||||
for _, excerpt := range []string{
|
||||
"Lorem ipsum dolor sit amet, consectetur",
|
||||
"porta at ultrices in, consectetur at augue.",
|
||||
} {
|
||||
require.Contains(t, text, excerpt)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
|
||||
f.exportExtensions, _, err = parseExtensions("link.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := f.NewObject("example2.link.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open()
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
_, err = io.Copy(&buf, rc)
|
||||
require.NoError(t, err)
|
||||
text := buf.String()
|
||||
|
||||
require.True(t, strings.HasPrefix(text, "<html>"))
|
||||
require.True(t, strings.HasSuffix(text, "</html>\n"))
|
||||
for _, excerpt := range []string{
|
||||
`<meta http-equiv="refresh"`,
|
||||
`Loading <a href="`,
|
||||
} {
|
||||
require.Contains(t, text, excerpt)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
f.InternalTestDocumentImport(t)
|
||||
t.Run("DocumentUpdate", func(t *testing.T) {
|
||||
f.InternalTestDocumentUpdate(t)
|
||||
t.Run("DocumentExport", func(t *testing.T) {
|
||||
f.InternalTestDocumentExport(t)
|
||||
t.Run("DocumentLink", func(t *testing.T) {
|
||||
f.InternalTestDocumentLink(t)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
// Test Drive filesystem interface
|
||||
package drive_test
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +15,23 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDrive:",
|
||||
NilObject: (*drive.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
CeilChunkSize: fstests.NextPowerOfTwo,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
6
backend/drive/drive_unsupported.go
Normal file
6
backend/drive/drive_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package drive
|
||||
178
backend/drive/test/about.json
Normal file
178
backend/drive/test/about.json
Normal file
@@ -0,0 +1,178 @@
|
||||
{
|
||||
"importFormats": {
|
||||
"text/tab-separated-values": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/x-vnd.oasis.opendocument.presentation": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"image/jpeg": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"image/bmp": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"image/gif": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-powerpoint.presentation.macroenabled.12": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.ms-word.template.macroenabled.12": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"image/pjpeg": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.google-apps.script+text/plain": [
|
||||
"application/vnd.google-apps.script"
|
||||
],
|
||||
"application/vnd.ms-excel": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.sun.xml.writer": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-word.document.macroenabled.12": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"text/rtf": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"text/plain": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.oasis.opendocument.spreadsheet": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"image/png": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/x-vnd.oasis.opendocument.text": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/msword": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/pdf": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/json": [
|
||||
"application/vnd.google-apps.script"
|
||||
],
|
||||
"application/x-msmetafile": [
|
||||
"application/vnd.google-apps.drawing"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.ms-powerpoint": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.ms-excel.template.macroenabled.12": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"image/x-bmp": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/rtf": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.template": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"image/x-png": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"text/html": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.oasis.opendocument.text": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.script+json": [
|
||||
"application/vnd.google-apps.script"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.slideshow": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.ms-powerpoint.template.macroenabled.12": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"text/csv": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.oasis.opendocument.presentation": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"image/jpg": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"text/richtext": [
|
||||
"application/vnd.google-apps.document"
|
||||
]
|
||||
},
|
||||
"exportFormats": {
|
||||
"application/vnd.google-apps.document": [
|
||||
"application/rtf",
|
||||
"application/vnd.oasis.opendocument.text",
|
||||
"text/html",
|
||||
"application/pdf",
|
||||
"application/epub+zip",
|
||||
"application/zip",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.spreadsheet": [
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet",
|
||||
"text/tab-separated-values",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"text/csv",
|
||||
"application/zip",
|
||||
"application/vnd.oasis.opendocument.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.jam": [
|
||||
"application/pdf"
|
||||
],
|
||||
"application/vnd.google-apps.script": [
|
||||
"application/vnd.google-apps.script+json"
|
||||
],
|
||||
"application/vnd.google-apps.presentation": [
|
||||
"application/vnd.oasis.opendocument.presentation",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.form": [
|
||||
"application/zip"
|
||||
],
|
||||
"application/vnd.google-apps.drawing": [
|
||||
"image/svg+xml",
|
||||
"image/png",
|
||||
"application/pdf",
|
||||
"image/jpeg"
|
||||
]
|
||||
}
|
||||
}
|
||||
BIN
backend/drive/test/files/example1.ods
Normal file
BIN
backend/drive/test/files/example1.ods
Normal file
Binary file not shown.
BIN
backend/drive/test/files/example2.doc
Normal file
BIN
backend/drive/test/files/example2.doc
Normal file
Binary file not shown.
BIN
backend/drive/test/files/example3.odt
Normal file
BIN
backend/drive/test/files/example3.odt
Normal file
Binary file not shown.
@@ -8,6 +8,8 @@
|
||||
//
|
||||
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
@@ -50,11 +52,12 @@ type resumableUpload struct {
|
||||
}
|
||||
|
||||
// Upload the io.Reader in of size bytes with contentType and info
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
|
||||
params := make(url.Values)
|
||||
params.Set("alt", "json")
|
||||
params.Set("uploadType", "resumable")
|
||||
params.Set("fields", partialFields)
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
|
||||
params := url.Values{
|
||||
"alt": {"json"},
|
||||
"uploadType": {"resumable"},
|
||||
"fields": {partialFields},
|
||||
}
|
||||
if f.isTeamDrive {
|
||||
params.Set("supportsTeamDrives", "true")
|
||||
}
|
||||
@@ -182,7 +185,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
|
||||
// been 200 OK.
|
||||
//
|
||||
// So parse the response out of the body. We aren't expecting
|
||||
// any other 2xx codes, so we parse it unconditionaly on
|
||||
// any other 2xx codes, so we parse it unconditionally on
|
||||
// StatusCode
|
||||
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
|
||||
return 598, err
|
||||
|
||||
@@ -31,9 +31,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
@@ -79,8 +81,8 @@ const (
|
||||
// Choose 48MB which is 91% of Maximum speed. rclone by
|
||||
// default does 4 transfers so this should use 4*48MB = 192MB
|
||||
// by default.
|
||||
defaultChunkSize = 48 * 1024 * 1024
|
||||
maxChunkSize = 150 * 1024 * 1024
|
||||
defaultChunkSize = 48 * fs.MebiByte
|
||||
maxChunkSize = 150 * fs.MebiByte
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -120,9 +122,21 @@ func init() {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Dropbox App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: fmt.Sprintf("Upload chunk size. Max %v.", fs.SizeSuffix(maxChunkSize)),
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Name: "chunk_size",
|
||||
Help: fmt.Sprintf(`Upload chunk size. (< %v).
|
||||
|
||||
Any files larger than this will be uploaded in chunks of this size.
|
||||
|
||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
||||
deal with retries. Setting this larger will increase the speed
|
||||
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
||||
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Help: "Impersonate this user when using a business account.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
@@ -130,7 +144,8 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@@ -142,6 +157,7 @@ type Fs struct {
|
||||
srv files.Client // the connection to the dropbox server
|
||||
sharing sharing.Client // as above, but for generating sharing links
|
||||
users users.Client // as above, but for accessing user information
|
||||
team team.Client // for the Teams API
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
@@ -188,14 +204,42 @@ func shouldRetry(err error) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
baseErrString := errors.Cause(err).Error()
|
||||
// FIXME there is probably a better way of doing this!
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
||||
// handle any official Retry-After header from Dropbox's SDK first
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behavior for backward compatibility
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
if cs > maxChunkSize {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -203,8 +247,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize > maxChunkSize {
|
||||
return nil, errors.Errorf("chunk size too big, must be < %v", maxChunkSize)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "dropbox: chunk size")
|
||||
}
|
||||
|
||||
// Convert the old token if it exists. The old token was just
|
||||
@@ -235,6 +280,29 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
Client: oAuthClient, // maybe???
|
||||
HeaderGenerator: f.headerGenerator,
|
||||
}
|
||||
|
||||
// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
|
||||
f.team = team.New(config)
|
||||
|
||||
if opt.Impersonate != "" {
|
||||
|
||||
user := team.UserSelectorArg{
|
||||
Email: opt.Impersonate,
|
||||
}
|
||||
user.Tag = "email"
|
||||
|
||||
members := []*team.UserSelectorArg{&user}
|
||||
args := team.NewMembersGetInfoArgs(members)
|
||||
|
||||
memberIds, err := f.team.MembersGetInfo(args)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
|
||||
}
|
||||
|
||||
config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||
}
|
||||
|
||||
f.srv = files.New(config)
|
||||
f.sharing = sharing.New(config)
|
||||
f.users = users.New(config)
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test Dropbox filesystem interface
|
||||
package dropbox_test
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/dropbox"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,15 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDropbox:",
|
||||
NilObject: (*dropbox.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -166,7 +166,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||
// Parse config into Options struct
|
||||
@@ -646,7 +646,21 @@ func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
|
||||
|
||||
// Close the FTP reader and return the connection to the pool
|
||||
func (f *ftpReadCloser) Close() error {
|
||||
err := f.rc.Close()
|
||||
var err error
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
errchan <- f.rc.Close()
|
||||
}()
|
||||
// Wait for Close for up to 60 seconds
|
||||
timer := time.NewTimer(60 * time.Second)
|
||||
select {
|
||||
case err = <-errchan:
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f.f, "Timeout when waiting for connection Close")
|
||||
return nil
|
||||
}
|
||||
// if errors while reading or closing, dump the connection
|
||||
if err != nil || f.err != nil {
|
||||
_ = f.c.Quit()
|
||||
@@ -704,6 +718,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
// remove the file if upload failed
|
||||
remove := func() {
|
||||
// Give the FTP server a chance to get its internal state in order after the error.
|
||||
// The error may have been local in which case we closed the connection. The server
|
||||
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
|
||||
// able to think of a better method to find out if the server has finished - ncw
|
||||
time.Sleep(1 * time.Second)
|
||||
removeErr := o.Remove()
|
||||
if removeErr != nil {
|
||||
fs.Debugf(o, "Failed to remove: %v", removeErr)
|
||||
@@ -717,7 +736,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
err = c.Stor(path, in)
|
||||
if err != nil {
|
||||
_ = c.Quit()
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
remove()
|
||||
return errors.Wrap(err, "update stor")
|
||||
}
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Package googlecloudstorage provides an interface to Google Cloud Storage
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
|
||||
/*
|
||||
@@ -159,21 +162,36 @@ func init() {
|
||||
}, {
|
||||
Value: "asia-east1",
|
||||
Help: "Taiwan.",
|
||||
}, {
|
||||
Value: "asia-east2",
|
||||
Help: "Hong Kong.",
|
||||
}, {
|
||||
Value: "asia-northeast1",
|
||||
Help: "Tokyo.",
|
||||
}, {
|
||||
Value: "asia-south1",
|
||||
Help: "Mumbai.",
|
||||
}, {
|
||||
Value: "asia-southeast1",
|
||||
Help: "Singapore.",
|
||||
}, {
|
||||
Value: "australia-southeast1",
|
||||
Help: "Sydney.",
|
||||
}, {
|
||||
Value: "europe-north1",
|
||||
Help: "Finland.",
|
||||
}, {
|
||||
Value: "europe-west1",
|
||||
Help: "Belgium.",
|
||||
}, {
|
||||
Value: "europe-west2",
|
||||
Help: "London.",
|
||||
}, {
|
||||
Value: "europe-west3",
|
||||
Help: "Frankfurt.",
|
||||
}, {
|
||||
Value: "europe-west4",
|
||||
Help: "Netherlands.",
|
||||
}, {
|
||||
Value: "us-central1",
|
||||
Help: "Iowa.",
|
||||
@@ -186,6 +204,9 @@ func init() {
|
||||
}, {
|
||||
Value: "us-west1",
|
||||
Help: "Oregon.",
|
||||
}, {
|
||||
Value: "us-west2",
|
||||
Help: "California.",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
@@ -279,7 +300,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry determines whehter a given err rates being retried
|
||||
// shouldRetry determines whether a given err rates being retried
|
||||
func shouldRetry(err error) (again bool, errOut error) {
|
||||
again = false
|
||||
if err != nil {
|
||||
@@ -327,7 +348,7 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
var oAuthClient *http.Client
|
||||
|
||||
@@ -345,7 +366,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
if opt.ServiceAccountCredentials != "" && opt.ServiceAccountFile != "" {
|
||||
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
@@ -40,6 +40,9 @@ func init() {
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
}, {
|
||||
Value: "https://user:pass@example.com",
|
||||
Help: "Connect to example.com using a username and password",
|
||||
}},
|
||||
}},
|
||||
}
|
||||
@@ -193,7 +196,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
}
|
||||
err := o.stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Stat failed")
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
@@ -248,7 +251,7 @@ func parseName(base *url.URL, name string) (string, error) {
|
||||
}
|
||||
// calculate the name relative to the base
|
||||
name = u.Path[len(base.Path):]
|
||||
// musn't be empty
|
||||
// mustn't be empty
|
||||
if name == "" {
|
||||
return "", errNameIsEmpty
|
||||
}
|
||||
@@ -416,6 +419,9 @@ func (o *Object) url() string {
|
||||
func (o *Object) stat() error {
|
||||
url := o.url()
|
||||
res, err := o.fs.httpClient.Head(url)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stat")
|
||||
|
||||
@@ -144,6 +144,11 @@ func TestNewObject(t *testing.T) {
|
||||
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
|
||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||
|
||||
// check object not found
|
||||
o, err = f.NewObject("not found.txt")
|
||||
assert.Nil(t, o)
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
|
||||
@@ -2,7 +2,9 @@ package hubic
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
|
||||
@@ -21,12 +23,17 @@ func newAuth(f *Fs) *auth {
|
||||
// Request constructs a http.Request for authentication
|
||||
//
|
||||
// returns nil for not needed
|
||||
func (a *auth) Request(*swift.Connection) (*http.Request, error) {
|
||||
err := a.f.getCredentials()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
|
||||
}
|
||||
return nil, nil
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Response parses the result of an http request
|
||||
|
||||
@@ -9,8 +9,10 @@ package hubic
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
@@ -60,13 +62,13 @@ func init() {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Options: append([]fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Hubic Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Hubic Client Secret\nLeave blank normally.",
|
||||
}},
|
||||
}}, swift.SharedOptions...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -124,7 +126,9 @@ func (f *Fs) getCredentials() (err error) {
|
||||
}
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
return errors.Errorf("failed to get credentials: %s", resp.Status)
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
|
||||
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
}
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
var result credentials
|
||||
|
||||
@@ -9,7 +9,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// default time format for almost all request and responses
|
||||
timeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
// the API server seems to use a different format
|
||||
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
|
||||
)
|
||||
|
||||
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
|
||||
@@ -40,6 +43,9 @@ func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
// Return Time string in Jottacloud format
|
||||
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
|
||||
// Flag is a hacky type for checking if an attribute is present
|
||||
type Flag bool
|
||||
|
||||
@@ -58,6 +64,15 @@ func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
|
||||
return attr, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type TokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>
|
||||
|
||||
@@ -233,16 +248,17 @@ GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
|
||||
|
||||
// JottaFile represents a Jottacloud file
|
||||
type JottaFile struct {
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt Time `xml:"currentRevision>created"`
|
||||
ModifiedAt Time `xml:"currentRevision>modified"`
|
||||
Updated Time `xml:"currentRevision>updated"`
|
||||
Size int64 `xml:"currentRevision>size"`
|
||||
MimeType string `xml:"currentRevision>mime"`
|
||||
MD5 string `xml:"currentRevision>md5"`
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt Time `xml:"currentRevision>created"`
|
||||
ModifiedAt Time `xml:"currentRevision>modified"`
|
||||
Updated Time `xml:"currentRevision>updated"`
|
||||
Size int64 `xml:"currentRevision>size"`
|
||||
MimeType string `xml:"currentRevision>mime"`
|
||||
MD5 string `xml:"currentRevision>md5"`
|
||||
}
|
||||
|
||||
// Error is a custom Error for wrapping Jottacloud error responses
|
||||
@@ -264,3 +280,37 @@ func (e *Error) Error() string {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AllocateFileRequest to prepare an upload to Jottacloud
|
||||
type AllocateFileRequest struct {
|
||||
Bytes int64 `json:"bytes"`
|
||||
Created string `json:"created"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// AllocateFileResponse for upload requests
|
||||
type AllocateFileResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
State string `json:"state"`
|
||||
UploadID string `json:"upload_id"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
ResumePos int64 `json:"resume_pos"`
|
||||
}
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -25,21 +26,42 @@ import (
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// Globals
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Sync"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Sync" // nolint
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/files/v1/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
configUsername = "user"
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app for a personal account
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: tokenURL,
|
||||
TokenURL: tokenURL,
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -48,13 +70,71 @@ func init() {
|
||||
Name: "jottacloud",
|
||||
Description: "JottaCloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
username, ok := m.Get(configUsername)
|
||||
if !ok {
|
||||
log.Fatalf("No username defined")
|
||||
}
|
||||
password := config.GetPassword("Your Jottacloud password is only required during config and will not be stored.")
|
||||
|
||||
// prepare out token request with username and password
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
values := url.Values{}
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
values.Set("password", password)
|
||||
values.Set("username", username)
|
||||
values.Set("client_id", oauthConfig.ClientID)
|
||||
values.Set("client_secret", oauthConfig.ClientSecret)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var jsonToken api.TokenJSON
|
||||
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account has 2 factor authentication enabled you will receive a verification code via SMS.\n")
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := config.ReadLine()
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // the sms received contains a pair of 3 digit numbers seperated by '-' but wants a single 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get resource token: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var token oauth2.Token
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
token.TokenType = jsonToken.TokenType
|
||||
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
||||
|
||||
// finally save them in the config
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while setting token: %s", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User Name",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
IsPassword: true,
|
||||
Name: configUsername,
|
||||
Help: "User Name:",
|
||||
}, {
|
||||
Name: "mountpoint",
|
||||
Help: "The mountpoint to use.",
|
||||
@@ -71,6 +151,21 @@ func init() {
|
||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Delete files permanently rather than putting them into the trash.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "unlink",
|
||||
Help: "Remove existing public link to file/folder with link command rather than creating.\nDefault is false, meaning link command will create or retrieve public link.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_resume_limit",
|
||||
Help: "Files bigger than this can be resumed if the upload fail's.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -78,21 +173,25 @@ func init() {
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Mountpoint string `config:"mountpoint"`
|
||||
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
Unlink bool `config:"unlink"`
|
||||
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
|
||||
}
|
||||
|
||||
// Fs represents a remote jottacloud
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
user string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpointURL string
|
||||
srv *rest.Client
|
||||
pacer *pacer.Pacer
|
||||
name string
|
||||
root string
|
||||
user string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpointURL string
|
||||
srv *rest.Client
|
||||
apiSrv *rest.Client
|
||||
pacer *pacer.Pacer
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
// Object describes a jottacloud object
|
||||
@@ -185,7 +284,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
||||
func (f *Fs) getAccountInfo() (info *api.AccountInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: rest.URLPathEscape(f.user),
|
||||
Path: urlPathEscape(f.user),
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
@@ -206,7 +305,7 @@ func (f *Fs) setEndpointURL(mountpoint string) (err error) {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get endpoint url")
|
||||
}
|
||||
f.endpointURL = rest.URLPathEscape(path.Join(info.Username, defaultDevice, mountpoint))
|
||||
f.endpointURL = urlPathEscape(path.Join(info.Username, defaultDevice, mountpoint))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -227,9 +326,19 @@ func errorHandler(resp *http.Response) error {
|
||||
return errResponse
|
||||
}
|
||||
|
||||
// Jottacloud want's '+' to be URL encoded even though the RFC states it's not reserved
|
||||
func urlPathEscape(in string) string {
|
||||
return strings.Replace(rest.URLPathEscape(in), "+", "%2B", -1)
|
||||
}
|
||||
|
||||
// filePathRaw returns an unescaped file path (f.root, file)
|
||||
func (f *Fs) filePathRaw(file string) string {
|
||||
return path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, file)))
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, file)
|
||||
func (f *Fs) filePath(file string) string {
|
||||
return rest.URLPathEscape(path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, file))))
|
||||
return urlPathEscape(f.filePathRaw(file))
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, remote)
|
||||
@@ -237,6 +346,29 @@ func (o *Object) filePath() string {
|
||||
return o.fs.filePath(o.remote)
|
||||
}
|
||||
|
||||
// Jottacloud requires the grant_type 'refresh_token' string
|
||||
// to be uppercase and throws a 400 Bad Request if we use the
|
||||
// lower case used by the oauth2 module
|
||||
//
|
||||
// This filter catches all refresh requests, reads the body,
|
||||
// changes the case and then sends it on
|
||||
func grantTypeFilter(req *http.Request) {
|
||||
if tokenURL == req.URL.String() {
|
||||
// read the entire body
|
||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = req.Body.Close()
|
||||
|
||||
// make the refresh token upper case
|
||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||
|
||||
// set the new ReadCloser (with a dummy Close())
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -249,25 +381,29 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = parsePath(root)
|
||||
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
|
||||
if opt.Pass != "" {
|
||||
var err error
|
||||
opt.Pass, err = obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
||||
}
|
||||
// the oauth client for the api servers needs
|
||||
// a filter to fix the grant_type issues (see above)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
SetRequestFilter(f func(req *http.Request))
|
||||
}); ok {
|
||||
do.SetRequestFilter(grantTypeFilter)
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
user: opt.User,
|
||||
opt: *opt,
|
||||
//endpointURL: rest.URLPathEscape(path.Join(user, defaultDevice, opt.Mountpoint)),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
name: name,
|
||||
root: root,
|
||||
user: opt.User,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -275,14 +411,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
}).Fill(f)
|
||||
|
||||
if user == "" || pass == "" {
|
||||
return nil, errors.New("jottacloud needs user and password")
|
||||
}
|
||||
|
||||
f.srv.SetUserPass(opt.User, opt.Pass)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath("")
|
||||
return err
|
||||
})
|
||||
|
||||
err = f.setEndpointURL(opt.Mountpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get account info")
|
||||
@@ -307,7 +443,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -324,7 +459,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData() // reads info and meta, returning an error
|
||||
err = o.readMetaData(false) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -372,7 +507,7 @@ func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
//fmt.Printf("List: %s\n", dir)
|
||||
//fmt.Printf("List: %s\n", f.filePath(dir))
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(dir),
|
||||
@@ -425,6 +560,102 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listFileDirFn is called from listFileDir to handle an object.
|
||||
type listFileDirFn func(fs.DirEntry) error
|
||||
|
||||
// List the objects and directories into entries, from a
|
||||
// special kind of JottaFolder representing a FileDirLis
|
||||
func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
|
||||
pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath
|
||||
pathPrefixLength := len(pathPrefix)
|
||||
startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object
|
||||
startPathLength := len(startPath)
|
||||
for i := range startFolder.Folders {
|
||||
folder := &startFolder.Folders[i]
|
||||
if folder.Deleted {
|
||||
return nil
|
||||
}
|
||||
folderPath := restoreReservedChars(path.Join(folder.Path, folder.Name))
|
||||
folderPathLength := len(folderPath)
|
||||
var remoteDir string
|
||||
if folderPathLength > pathPrefixLength {
|
||||
remoteDir = folderPath[pathPrefixLength+1:]
|
||||
if folderPathLength > startPathLength {
|
||||
d := fs.NewDir(remoteDir, time.Time(folder.ModifiedAt))
|
||||
err := fn(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range folder.Files {
|
||||
file := &folder.Files[i]
|
||||
if file.Deleted || file.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
remoteFile := path.Join(remoteDir, restoreReservedChars(file.Name))
|
||||
o, err := f.newObjectWithInfo(remoteFile, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = fn(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(dir),
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("mode", "list")
|
||||
|
||||
var resp *http.Response
|
||||
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.listFileDir(dir, &result, func(entry fs.DirEntry) error {
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
@@ -498,7 +729,11 @@ func (f *Fs) purgeCheck(dir string, check bool) (err error) {
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
opts.Parameters.Set("dlDir", "true")
|
||||
if f.opt.HardDelete {
|
||||
opts.Parameters.Set("rmDir", "true")
|
||||
} else {
|
||||
opts.Parameters.Set("dlDir", "true")
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -506,7 +741,7 @@ func (f *Fs) purgeCheck(dir string, check bool) (err error) {
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rmdir failed")
|
||||
return errors.Wrap(err, "couldn't purge directory")
|
||||
}
|
||||
|
||||
// TODO: Parse response?
|
||||
@@ -534,7 +769,7 @@ func (f *Fs) Purge() error {
|
||||
return f.purgeCheck("", false)
|
||||
}
|
||||
|
||||
// copyOrMoves copys or moves directories or files depending on the mthod parameter
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -552,7 +787,6 @@ func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err erro
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@@ -579,7 +813,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
info, err := f.copyOrMove("cp", srcObj.filePath(), remote)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "copy failed")
|
||||
return nil, errors.Wrap(err, "couldn't copy file")
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(remote, info)
|
||||
@@ -609,7 +843,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
info, err := f.copyOrMove("mv", srcObj.filePath(), remote)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "move failed")
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(remote, info)
|
||||
@@ -653,11 +887,57 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
_, err = f.copyOrMove("mvDir", path.Join(f.endpointURL, replaceReservedChars(srcPath))+"/", dstRemote)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "moveDir failed")
|
||||
return errors.Wrap(err, "couldn't move directory")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(remote),
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
if f.opt.Unlink {
|
||||
opts.Parameters.Set("mode", "disableShare")
|
||||
} else {
|
||||
opts.Parameters.Set("mode", "enableShare")
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var result api.JottaFile
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return "", fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if f.opt.Unlink {
|
||||
return "", errors.Wrap(err, "couldn't remove public link")
|
||||
}
|
||||
return "", errors.Wrap(err, "couldn't create public link")
|
||||
}
|
||||
if f.opt.Unlink {
|
||||
if result.PublicSharePath != "" {
|
||||
return "", errors.Errorf("couldn't remove public link - %q", result.PublicSharePath)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
if result.PublicSharePath == "" {
|
||||
return "", errors.New("couldn't create public link - no link path received")
|
||||
}
|
||||
link = path.Join(baseURL, result.PublicSharePath)
|
||||
return link, nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
info, err := f.getAccountInfo()
|
||||
@@ -666,8 +946,11 @@ func (f *Fs) About() (*fs.Usage, error) {
|
||||
}
|
||||
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(info.Capacity),
|
||||
Used: fs.NewUsageValue(info.Usage),
|
||||
Used: fs.NewUsageValue(info.Usage),
|
||||
}
|
||||
if info.Capacity > 0 {
|
||||
usage.Total = fs.NewUsageValue(info.Capacity)
|
||||
usage.Free = fs.NewUsageValue(info.Capacity - info.Usage)
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
@@ -707,7 +990,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(false)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
@@ -723,21 +1006,24 @@ func (o *Object) MimeType() string {
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.JottaFile) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = int64(info.Size)
|
||||
o.size = info.Size
|
||||
o.md5 = info.MD5
|
||||
o.mimeType = info.MimeType
|
||||
o.modTime = time.Time(info.ModifiedAt)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.hasMetaData {
|
||||
func (o *Object) readMetaData(force bool) (err error) {
|
||||
if o.hasMetaData && !force {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Deleted {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
@@ -746,7 +1032,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(false)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -794,7 +1080,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) {
|
||||
// we need a MD5
|
||||
md5Hasher := md5.New()
|
||||
// use the teeReader to write to the local file AND caclulate the MD5 while doing so
|
||||
// use the teeReader to write to the local file AND calculate the MD5 while doing so
|
||||
teeReader := io.TeeReader(in, md5Hasher)
|
||||
|
||||
// nothing to clean up by default
|
||||
@@ -867,43 +1153,74 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
in = wrap(in)
|
||||
}
|
||||
|
||||
// use the api to allocate the file first and get resume / deduplication info
|
||||
var resp *http.Response
|
||||
var result api.JottaFile
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: o.filePath(),
|
||||
Body: in,
|
||||
ContentType: fs.MimeType(src),
|
||||
ContentLength: &size,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
Parameters: url.Values{},
|
||||
Method: "POST",
|
||||
Path: "allocate",
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(src.ModTime()).APIString()
|
||||
|
||||
// the allocate request
|
||||
var request = api.AllocateFileRequest{
|
||||
Bytes: size,
|
||||
Created: fileDate,
|
||||
Modified: fileDate,
|
||||
Md5: md5String,
|
||||
Path: path.Join(o.fs.opt.Mountpoint, replaceReservedChars(path.Join(o.fs.root, o.remote))),
|
||||
}
|
||||
|
||||
opts.ExtraHeaders["JMd5"] = md5String
|
||||
opts.Parameters.Set("cphash", md5String)
|
||||
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
|
||||
// opts.ExtraHeaders["JCreated"] = api.Time(src.ModTime()).String()
|
||||
opts.ExtraHeaders["JModified"] = api.Time(src.ModTime()).String()
|
||||
|
||||
// Parameters observed in other implementations
|
||||
//opts.ExtraHeaders["X-Jfs-DeviceName"] = "Jotta"
|
||||
//opts.ExtraHeaders["X-Jfs-Devicename-Base64"] = ""
|
||||
//opts.ExtraHeaders["X-Jftp-Version"] = "2.4" this appears to be the current version
|
||||
//opts.ExtraHeaders["jx_csid"] = ""
|
||||
//opts.ExtraHeaders["jx_lisence"] = ""
|
||||
|
||||
opts.Parameters.Set("umode", "nomultipart")
|
||||
|
||||
// send it
|
||||
var response api.AllocateFileResponse
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallXML(&opts, nil, &result)
|
||||
resp, err = o.fs.apiSrv.CallJSON(&opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Check returned Metadata? Timeout on big uploads?
|
||||
return o.setMetaData(&result)
|
||||
// If the file state is INCOMPLETE and CORRPUT, try to upload a then
|
||||
if response.State != "COMPLETED" {
|
||||
// how much do we still have to upload?
|
||||
remainingBytes := size - response.ResumePos
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: response.UploadURL,
|
||||
ContentLength: &remainingBytes,
|
||||
ContentType: "application/octet-stream",
|
||||
Body: in,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
if response.ResumePos != 0 {
|
||||
opts.ExtraHeaders["Range"] = "bytes=" + strconv.FormatInt(response.ResumePos, 10) + "-" + strconv.FormatInt(size-1, 10)
|
||||
}
|
||||
|
||||
// copy the already uploaded bytes into the trash :)
|
||||
var result api.UploadResponse
|
||||
_, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// send the remaining bytes
|
||||
resp, err = o.fs.apiSrv.CallJSON(&opts, nil, &result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// finally update the meta data
|
||||
o.hasMetaData = true
|
||||
o.size = result.Bytes
|
||||
o.md5 = result.Md5
|
||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||
} else {
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was allready found but we still ned to update our metadata
|
||||
return o.readMetaData(true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
@@ -912,9 +1229,14 @@ func (o *Object) Remove() error {
|
||||
Method: "POST",
|
||||
Path: o.filePath(),
|
||||
Parameters: url.Values{},
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
opts.Parameters.Set("dl", "true")
|
||||
if o.fs.opt.HardDelete {
|
||||
opts.Parameters.Set("rm", "true")
|
||||
} else {
|
||||
opts.Parameters.Set("dl", "true")
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallXML(&opts, nil, nil)
|
||||
@@ -924,12 +1246,14 @@ func (o *Object) Remove() error {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -4,50 +4,25 @@ import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// A test reader to return a test pattern of size
|
||||
type testReader struct {
|
||||
size int64
|
||||
c byte
|
||||
}
|
||||
|
||||
// Reader is the interface that wraps the basic Read method.
|
||||
func (r *testReader) Read(p []byte) (n int, err error) {
|
||||
for i := range p {
|
||||
if r.size <= 0 {
|
||||
return n, io.EOF
|
||||
}
|
||||
p[i] = r.c
|
||||
r.c = (r.c + 1) % 253
|
||||
r.size--
|
||||
n++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestReadMD5(t *testing.T) {
|
||||
// smoke test the reader
|
||||
b, err := ioutil.ReadAll(&testReader{size: 10})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, b)
|
||||
|
||||
// Check readMD5 for different size and threshold
|
||||
for _, size := range []int64{0, 1024, 10 * 1024, 100 * 1024} {
|
||||
t.Run(fmt.Sprintf("%d", size), func(t *testing.T) {
|
||||
hasher := md5.New()
|
||||
n, err := io.Copy(hasher, &testReader{size: size})
|
||||
n, err := io.Copy(hasher, readers.NewPatternReader(size))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, n, size)
|
||||
wantMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
|
||||
for _, threshold := range []int64{512, 1024, 10 * 1024, 20 * 1024} {
|
||||
t.Run(fmt.Sprintf("%d", threshold), func(t *testing.T) {
|
||||
in := &testReader{size: size}
|
||||
in := readers.NewPatternReader(size)
|
||||
gotMD5, out, cleanup, err := readMD5(in, size, threshold)
|
||||
defer cleanup()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Translate file names for JottaCloud adapted from OneDrive
|
||||
|
||||
|
||||
The following characters are JottaClous reserved characters, and can't
|
||||
The following characters are JottaCloud reserved characters, and can't
|
||||
be used in JottaCloud folder and file names.
|
||||
|
||||
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
|
||||
@@ -27,21 +27,14 @@ import (
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'+': '+', // FULLWIDTH PLUS SIGN
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
'!': '!', // FULLWIDTH EXCLAMATION MARK
|
||||
'&': '&', // FULLWIDTH AMPERSAND
|
||||
':': ':', // FULLWIDTH COLON
|
||||
';': ';', // FULLWIDTH SEMICOLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'#': '#', // FULLWIDTH NUMBER SIGN
|
||||
'%': '%', // FULLWIDTH PERCENT SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'\'': ''', // FULLWIDTH APOSTROPHE
|
||||
'~': '~', // FULLWIDTH TILDE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
|
||||
@@ -9,8 +9,8 @@ func TestReplace(t *testing.T) {
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\+*<>?!&:;|#%"'~`, `\+*<>?!&:;|#%"'~`},
|
||||
{`\+*<>?!&:;|#%"'~\+*<>?!&:;|#%"'~`, `\+*<>?!&:;|#%"'~\+*<>?!&:;|#%"'~`},
|
||||
{`\*<>?:;|"`, `\*<>?:;|"`},
|
||||
{`\*<>?:;|"\*<>?:;|"`, `\*<>?:;|"\*<>?:;|"`},
|
||||
{" leading space", "␠leading space"},
|
||||
{"trailing space ", "trailing space␠"},
|
||||
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||
|
||||
@@ -16,7 +16,7 @@ func (f *Fs) About() (*fs.Usage, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||
}
|
||||
bs := int64(s.Bsize)
|
||||
bs := int64(s.Bsize) // nolint: unconvert
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
|
||||
|
||||
20
backend/local/lchtimes.go
Normal file
20
backend/local/lchtimes.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// +build windows plan9
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const haveLChtimes = false
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
28
backend/local/lchtimes_unix.go
Normal file
28
backend/local/lchtimes_unix.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// +build !windows,!plan9
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const haveLChtimes = true
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
var utimes [2]unix.Timespec
|
||||
utimes[0] = unix.NsecToTimespec(atime.UnixNano())
|
||||
utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
|
||||
if e := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); e != nil {
|
||||
return &os.PathError{Op: "lchtimes", Path: name, Err: e}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -2,6 +2,7 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -16,15 +17,19 @@ import (
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/file"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
@@ -47,19 +52,40 @@ func init() {
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: "Don't warn about skipped symlinks.",
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
This flag disables warning messages on skipped symlinks or junction
|
||||
points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_unicode_normalization",
|
||||
Help: "Don't apply unicode normalization to paths and filenames",
|
||||
Name: "no_unicode_normalization",
|
||||
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
|
||||
This flag is deprecated now. Rclone no longer normalizes unicode file
|
||||
names, but it compares them with unicode normalization in the sync
|
||||
routine instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_updated",
|
||||
Help: "Don't check to see if the files change during upload",
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy
|
||||
- source file is being updated" if the file changes during upload.
|
||||
|
||||
However on some file systems this modification time check may fail (eg
|
||||
[Glusterfs #2206](https://github.com/ncw/rclone/issues/2206)) so this
|
||||
check can be disabled with this flag.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -76,12 +102,13 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
TranslateSymlinks bool `config:"links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
@@ -103,17 +130,20 @@ type Fs struct {
|
||||
|
||||
// Object represents a local filesystem object
|
||||
type Object struct {
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path - properly UTF-8 encoded - for rclone
|
||||
path string // The local path - may not be properly UTF-8 encoded - for OS
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
hashes map[hash.Type]string // Hashes
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path - properly UTF-8 encoded - for rclone
|
||||
path string // The local path - may not be properly UTF-8 encoded - for OS
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
hashes map[hash.Type]string // Hashes
|
||||
translatedLink bool // Is this object a translated link
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -122,6 +152,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.TranslateSymlinks && opt.FollowSymlinks {
|
||||
return nil, errLinksAndCopyLinks
|
||||
}
|
||||
|
||||
if opt.NoUTFNorm {
|
||||
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
@@ -149,7 +182,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err == nil {
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
if err == nil && fi.Mode().IsRegular() {
|
||||
if err == nil && f.isRegular(fi.Mode()) {
|
||||
// It is a file, so use the parent as the root
|
||||
f.root = filepath.Dir(f.root)
|
||||
// return an error with an fs which points to the parent
|
||||
@@ -158,6 +191,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Determine whether a file is a 'regular' file,
|
||||
// Symlinks are regular files, only if the TranslateSymlink
|
||||
// option is in-effect
|
||||
func (f *Fs) isRegular(mode os.FileMode) bool {
|
||||
if !f.opt.TranslateSymlinks {
|
||||
return mode.IsRegular()
|
||||
}
|
||||
|
||||
// fi.Mode().IsRegular() tests that all mode bits are zero
|
||||
// Since symlinks are accepted, test that all other bits are zero,
|
||||
// except the symlink bit
|
||||
return mode&os.ModeType&^os.ModeSymlink == 0
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
@@ -178,28 +225,48 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// caseInsenstive returns whether the remote is case insensitive or not
|
||||
// caseInsensitive returns whether the remote is case insensitive or not
|
||||
func (f *Fs) caseInsensitive() bool {
|
||||
// FIXME not entirely accurate since you can have case
|
||||
// sensitive Fses on darwin and case insenstive Fses on linux.
|
||||
// sensitive Fses on darwin and case insensitive Fses on linux.
|
||||
// Should probably check but that would involve creating a
|
||||
// file in the remote to be most accurate which probably isn't
|
||||
// desirable.
|
||||
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
|
||||
}
|
||||
|
||||
// translateLink checks whether the remote is a translated link
|
||||
// and returns a new path, removing the suffix as needed,
|
||||
// It also returns whether this is a translated link at all
|
||||
//
|
||||
// for regular files, dstPath is returned unchanged
|
||||
func translateLink(remote, dstPath string) (newDstPath string, isTranslatedLink bool) {
|
||||
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
||||
newDstPath = strings.TrimSuffix(dstPath, linkSuffix)
|
||||
return newDstPath, isTranslatedLink
|
||||
}
|
||||
|
||||
// newObject makes a half completed Object
|
||||
//
|
||||
// if dstPath is empty then it is made from remote
|
||||
func (f *Fs) newObject(remote, dstPath string) *Object {
|
||||
translatedLink := false
|
||||
|
||||
if dstPath == "" {
|
||||
dstPath = f.cleanPath(filepath.Join(f.root, remote))
|
||||
}
|
||||
remote = f.cleanRemote(remote)
|
||||
|
||||
if f.opt.TranslateSymlinks {
|
||||
// Possibly receive a new name for dstPath
|
||||
dstPath, translatedLink = translateLink(remote, dstPath)
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: dstPath,
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: dstPath,
|
||||
translatedLink: translatedLink,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -221,6 +288,11 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||
if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
}
|
||||
if o.mode.IsDir() {
|
||||
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
|
||||
@@ -244,6 +316,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
|
||||
dir = f.dirNames.Load(dir)
|
||||
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
|
||||
remote := f.cleanRemote(dir)
|
||||
@@ -280,6 +353,13 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// Follow symlinks if required
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
fi, err = os.Stat(newPath)
|
||||
if os.IsNotExist(err) {
|
||||
// Skip bad symlinks
|
||||
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
||||
fs.Errorf(newRemote, "Listing error: %v", err)
|
||||
accounting.Stats.Error(err)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -293,6 +373,10 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
entries = append(entries, d)
|
||||
}
|
||||
} else {
|
||||
// Check whether this link should be translated
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
}
|
||||
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -506,7 +590,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if !dstObj.mode.IsRegular() {
|
||||
} else if !dstObj.fs.isRegular(dstObj.mode) {
|
||||
// It isn't a file
|
||||
return nil, errors.New("can't move file onto non-file")
|
||||
}
|
||||
@@ -628,7 +712,13 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
|
||||
in, err := os.Open(o.path)
|
||||
var in io.ReadCloser
|
||||
|
||||
if !o.translatedLink {
|
||||
in, err = file.Open(o.path)
|
||||
} else {
|
||||
in, err = o.openTranslatedLink(0, -1)
|
||||
}
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to open")
|
||||
}
|
||||
@@ -659,7 +749,12 @@ func (o *Object) ModTime() time.Time {
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
err := os.Chtimes(o.path, modTime, modTime)
|
||||
var err error
|
||||
if o.translatedLink {
|
||||
err = lChtimes(o.path, modTime, modTime)
|
||||
} else {
|
||||
err = os.Chtimes(o.path, modTime, modTime)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -677,7 +772,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
}
|
||||
mode := o.mode
|
||||
if mode&os.ModeSymlink != 0 {
|
||||
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
|
||||
if !o.fs.opt.SkipSymlinks {
|
||||
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
||||
}
|
||||
@@ -738,6 +833,16 @@ func (file *localOpenFile) Close() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Returns a ReadCloser() object that contains the contents of a symbolic link
|
||||
func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err error) {
|
||||
// Read the link and return the destination it as the contents of the object
|
||||
linkdst, err := os.Readlink(o.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
@@ -757,7 +862,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
fd, err := os.Open(o.path)
|
||||
// Handle a translated link
|
||||
if o.translatedLink {
|
||||
return o.openTranslatedLink(offset, limit)
|
||||
}
|
||||
|
||||
fd, err := file.Open(o.path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -788,8 +898,19 @@ func (o *Object) mkdirAll() error {
|
||||
return os.MkdirAll(dir, 0777)
|
||||
}
|
||||
|
||||
type nopWriterCloser struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (nwc nopWriterCloser) Close() error {
|
||||
// noop
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the object from in with modTime and size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
var out io.WriteCloser
|
||||
|
||||
hashes := hash.Supported
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
@@ -803,9 +924,23 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
var symlinkData bytes.Buffer
|
||||
// If the object is a regular file, create it.
|
||||
// If it is a translated link, just read in the contents, and
|
||||
// then create a symlink
|
||||
if !o.translatedLink {
|
||||
f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = preAllocate(src.Size(), f)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
out = f
|
||||
} else {
|
||||
out = nopWriterCloser{&symlinkData}
|
||||
}
|
||||
|
||||
// Calculate the hash of the object we are reading as we go along
|
||||
@@ -820,6 +955,26 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
|
||||
if o.translatedLink {
|
||||
if err == nil {
|
||||
// Remove any current symlink or file, if one exists
|
||||
if _, err := os.Lstat(o.path); err == nil {
|
||||
if removeErr := os.Remove(o.path); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove previous file: %v", removeErr)
|
||||
return removeErr
|
||||
}
|
||||
}
|
||||
// Use the contents for the copied object to create a symlink
|
||||
err = os.Symlink(symlinkData.String(), o.path)
|
||||
}
|
||||
|
||||
// only continue if symlink creation succeeded
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fs.Logf(o, "Removing partially written file on error: %v", err)
|
||||
if removeErr := os.Remove(o.path); removeErr != nil {
|
||||
|
||||
@@ -1,13 +1,19 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/file"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -38,10 +44,13 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
filePath := "sub dir/local test"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
|
||||
fd, err := os.Open(path.Join(r.LocalName, filePath))
|
||||
fd, err := file.Open(path.Join(r.LocalName, filePath))
|
||||
if err != nil {
|
||||
t.Fatalf("failed opening file %q: %v", filePath, err)
|
||||
}
|
||||
defer func() {
|
||||
require.NoError(t, fd.Close())
|
||||
}()
|
||||
|
||||
fi, err := fd.Stat()
|
||||
require.NoError(t, err)
|
||||
@@ -72,3 +81,108 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestSymlink(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
f := r.Flocal.(*Fs)
|
||||
dir := f.root
|
||||
|
||||
// Write a file
|
||||
modTime1 := fstest.Time("2001-02-03T04:05:10.123123123Z")
|
||||
file1 := r.WriteFile("file.txt", "hello", modTime1)
|
||||
|
||||
// Write a symlink
|
||||
modTime2 := fstest.Time("2002-02-03T04:05:10.123123123Z")
|
||||
symlinkPath := filepath.Join(dir, "symlink.txt")
|
||||
require.NoError(t, os.Symlink("file.txt", symlinkPath))
|
||||
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
if runtime.GOOS == "windows" {
|
||||
file2.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
|
||||
// Check with no symlink flags
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2d)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
// Set fs into "-l" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
}
|
||||
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
if runtime.GOOS == "windows" {
|
||||
file3.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
}
|
||||
|
||||
// Check it got the correct contents
|
||||
symlinkPath = filepath.Join(dir, "symlink2.txt")
|
||||
fi, err := os.Lstat(symlinkPath)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, fi.Mode().IsRegular())
|
||||
linkText, err := os.Readlink(symlinkPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkText)
|
||||
|
||||
// Check that NewObject gets the correct object
|
||||
o, err := r.Flocal.NewObject("symlink2.txt" + linkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
}
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
_, err = r.Flocal.NewObject("symlink2.txt")
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Check reading the object
|
||||
in, err := o.Open()
|
||||
require.NoError(t, err)
|
||||
contents, err := ioutil.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt", string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
|
||||
// Check reading the object with range
|
||||
in, err = o.Open(&fs.RangeOption{Start: 2, End: 5})
|
||||
require.NoError(t, err)
|
||||
contents, err = ioutil.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt"[2:5+1], string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
}
|
||||
|
||||
func TestSymlinkError(t *testing.T) {
|
||||
m := configmap.Simple{
|
||||
"links": "true",
|
||||
"copy_links": "true",
|
||||
}
|
||||
_, err := NewFs("local", "/", m)
|
||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||
}
|
||||
|
||||
10
backend/local/preallocate_other.go
Normal file
10
backend/local/preallocate_other.go
Normal file
@@ -0,0 +1,10 @@
|
||||
//+build !windows,!linux
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
return nil
|
||||
}
|
||||
22
backend/local/preallocate_unix.go
Normal file
22
backend/local/preallocate_unix.go
Normal file
@@ -0,0 +1,22 @@
|
||||
//+build linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
if size <= 0 {
|
||||
return nil
|
||||
}
|
||||
err := unix.Fallocate(int(out.Fd()), unix.FALLOC_FL_KEEP_SIZE, 0, size)
|
||||
// FIXME could be doing something here
|
||||
// if err == unix.ENOSPC {
|
||||
// log.Printf("No space")
|
||||
// }
|
||||
return err
|
||||
}
|
||||
79
backend/local/preallocate_windows.go
Normal file
79
backend/local/preallocate_windows.go
Normal file
@@ -0,0 +1,79 @@
|
||||
//+build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var (
|
||||
ntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
ntQueryVolumeInformationFile = ntdll.NewProc("NtQueryVolumeInformationFile")
|
||||
ntSetInformationFile = ntdll.NewProc("NtSetInformationFile")
|
||||
)
|
||||
|
||||
type fileAllocationInformation struct {
|
||||
AllocationSize uint64
|
||||
}
|
||||
|
||||
type fileFsSizeInformation struct {
|
||||
TotalAllocationUnits uint64
|
||||
AvailableAllocationUnits uint64
|
||||
SectorsPerAllocationUnit uint32
|
||||
BytesPerSector uint32
|
||||
}
|
||||
|
||||
type ioStatusBlock struct {
|
||||
Status, Information uintptr
|
||||
}
|
||||
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
if size <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
iosb ioStatusBlock
|
||||
fsSizeInfo fileFsSizeInformation
|
||||
allocInfo fileAllocationInformation
|
||||
)
|
||||
|
||||
// Query info about the block sizes on the file system
|
||||
_, _, e1 := ntQueryVolumeInformationFile.Call(
|
||||
uintptr(out.Fd()),
|
||||
uintptr(unsafe.Pointer(&iosb)),
|
||||
uintptr(unsafe.Pointer(&fsSizeInfo)),
|
||||
uintptr(unsafe.Sizeof(fsSizeInfo)),
|
||||
uintptr(3), // FileFsSizeInformation
|
||||
)
|
||||
if e1 != nil && e1 != syscall.Errno(0) {
|
||||
return errors.Wrap(e1, "preAllocate NtQueryVolumeInformationFile failed")
|
||||
}
|
||||
|
||||
// Calculate the allocation size
|
||||
clusterSize := uint64(fsSizeInfo.BytesPerSector) * uint64(fsSizeInfo.SectorsPerAllocationUnit)
|
||||
if clusterSize <= 0 {
|
||||
return errors.Errorf("preAllocate clusterSize %d <= 0", clusterSize)
|
||||
}
|
||||
allocInfo.AllocationSize = (1 + uint64(size-1)/clusterSize) * clusterSize
|
||||
|
||||
// Ask for the allocation
|
||||
_, _, e1 = ntSetInformationFile.Call(
|
||||
uintptr(out.Fd()),
|
||||
uintptr(unsafe.Pointer(&iosb)),
|
||||
uintptr(unsafe.Pointer(&allocInfo)),
|
||||
uintptr(unsafe.Sizeof(allocInfo)),
|
||||
uintptr(19), // FileAllocationInformation
|
||||
)
|
||||
if e1 != nil && e1 != syscall.Errno(0) {
|
||||
return errors.Wrap(e1, "preAllocate NtSetInformationFile failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -22,5 +22,5 @@ func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
|
||||
fs.Debugf(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys())
|
||||
return devUnset
|
||||
}
|
||||
return uint64(statT.Dev)
|
||||
return uint64(statT.Dev) // nolint: unconvert
|
||||
}
|
||||
|
||||
@@ -63,13 +63,20 @@ func init() {
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "debug",
|
||||
Help: "Output more debug from Mega.",
|
||||
Name: "debug",
|
||||
Help: `Output more debug from Mega.
|
||||
|
||||
If this flag is set (along with -vv) it will print further debugging
|
||||
information from the mega backend.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Delete files permanently rather than putting them into the trash.",
|
||||
Name: "hard_delete",
|
||||
Help: `Delete files permanently rather than putting them into the trash.
|
||||
|
||||
Normally the mega backend will put all deletions into the trash rather
|
||||
than permanently deleting them. If you specify this then rclone will
|
||||
permanently delete objects instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -490,7 +497,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the dirNode, obect, leaf and error
|
||||
// Returns the dirNode, object, leaf and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
||||
@@ -516,10 +523,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(in, src, options...)
|
||||
return existingObj, existingObj.Update(in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(in, src)
|
||||
@@ -840,14 +847,14 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergDirs move failed on %q in %v", info.GetName(), srcDir)
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.GetName(), srcDir)
|
||||
}
|
||||
}
|
||||
// rmdir (into trash) the now empty source directory
|
||||
fs.Infof(srcDir, "removing empty directory")
|
||||
err = f.deleteNode(srcDirNode)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergDirs move failed to rmdir %q", srcDir)
|
||||
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -1069,6 +1076,9 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
size := src.Size()
|
||||
if size < 0 {
|
||||
return errors.New("mega backend can't upload a file of unknown length")
|
||||
}
|
||||
//modTime := src.ModTime()
|
||||
remote := o.Remote()
|
||||
|
||||
@@ -1119,7 +1129,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return errors.Wrap(err, "failed to finish upload")
|
||||
}
|
||||
|
||||
// If the upload succeded and the original object existed, then delete it
|
||||
// If the upload succeeded and the original object existed, then delete it
|
||||
if o.info != nil {
|
||||
err = o.fs.deleteNode(o.info)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,6 +9,9 @@ import (
|
||||
|
||||
const (
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
|
||||
// PackageTypeOneNote is the package type value for OneNote files
|
||||
PackageTypeOneNote = "oneNote"
|
||||
)
|
||||
|
||||
// Error is returned from one drive when things go wrong
|
||||
@@ -22,7 +25,7 @@ type Error struct {
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := e.ErrorInfo.Code
|
||||
if e.ErrorInfo.InnerError.Code != "" {
|
||||
@@ -32,7 +35,7 @@ func (e *Error) Error() string {
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error statisfies the error interface
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Identity represents an identity of an actor. For example, and actor
|
||||
@@ -91,9 +94,10 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
// ItemReference groups data needed to reference a OneDrive item
|
||||
// across the service into a single structure.
|
||||
type ItemReference struct {
|
||||
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
|
||||
ID string `json:"id"` // Unique identifier for the item. Read/Write.
|
||||
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
|
||||
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
|
||||
ID string `json:"id"` // Unique identifier for the item. Read/Write.
|
||||
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
|
||||
DriveType string `json:"driveType"` // Type of the drive, Read-Only
|
||||
}
|
||||
|
||||
// RemoteItemFacet groups data needed to reference a OneDrive remote item
|
||||
@@ -106,6 +110,7 @@ type RemoteItemFacet struct {
|
||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
|
||||
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
|
||||
Size int64 `json:"size"` // Size of the item in bytes. Read-only.
|
||||
@@ -146,6 +151,13 @@ type FileSystemInfoFacet struct {
|
||||
type DeletedFacet struct {
|
||||
}
|
||||
|
||||
// PackageFacet indicates that a DriveItem is the top level item
|
||||
// in a "package" or a collection of items that should be treated as a collection instead of individual items.
|
||||
// `oneNote` is the only currently defined value.
|
||||
type PackageFacet struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// Item represents metadata for an item in OneDrive
|
||||
type Item struct {
|
||||
ID string `json:"id"` // The unique identifier of the item within the Drive. Read-only.
|
||||
@@ -169,6 +181,7 @@ type Item struct {
|
||||
// Audio *AudioFacet `json:"audio"` // Audio metadata, if the item is an audio file. Read-only.
|
||||
// Video *VideoFacet `json:"video"` // Video metadata, if the item is a video. Read-only.
|
||||
// Location *LocationFacet `json:"location"` // Location metadata, if the item has location data. Read-only.
|
||||
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
|
||||
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
|
||||
}
|
||||
|
||||
@@ -237,6 +250,28 @@ type MoveItemRequest struct {
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo,omitempty"` // File system information on client. Read-write.
|
||||
}
|
||||
|
||||
//CreateShareLinkRequest is the request to create a sharing link
|
||||
//Always Type:view and Scope:anonymous for public sharing
|
||||
type CreateShareLinkRequest struct {
|
||||
Type string `json:"type"` //Link type in View, Edit or Embed
|
||||
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization
|
||||
}
|
||||
|
||||
//CreateShareLinkResponse is the response from CreateShareLinkRequest
|
||||
type CreateShareLinkResponse struct {
|
||||
ID string `json:"id"`
|
||||
Roles []string `json:"roles"`
|
||||
Link struct {
|
||||
Type string `json:"type"`
|
||||
Scope string `json:"scope"`
|
||||
WebURL string `json:"webUrl"`
|
||||
Application struct {
|
||||
ID string `json:"id"`
|
||||
DisplayName string `json:"displayName"`
|
||||
} `json:"application"`
|
||||
} `json:"link"`
|
||||
}
|
||||
|
||||
// AsyncOperationStatus provides information on the status of a asynchronous job progress.
|
||||
//
|
||||
// The following API calls return AsyncOperationStatus resources:
|
||||
@@ -244,13 +279,13 @@ type MoveItemRequest struct {
|
||||
// Copy Item
|
||||
// Upload From URL
|
||||
type AsyncOperationStatus struct {
|
||||
Operation string `json:"operation"` // The type of job being run.
|
||||
PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete.
|
||||
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
|
||||
}
|
||||
|
||||
// GetID returns a normalized ID of the item
|
||||
// If DriveID is known it will be prefixed to the ID with # seperator
|
||||
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
|
||||
func (i *Item) GetID() string {
|
||||
if i.IsRemote() && i.RemoteItem.ID != "" {
|
||||
return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID
|
||||
@@ -260,9 +295,9 @@ func (i *Item) GetID() string {
|
||||
return i.ID
|
||||
}
|
||||
|
||||
// GetDriveID returns a normalized ParentReferance of the item
|
||||
// GetDriveID returns a normalized ParentReference of the item
|
||||
func (i *Item) GetDriveID() string {
|
||||
return i.GetParentReferance().DriveID
|
||||
return i.GetParentReference().DriveID
|
||||
}
|
||||
|
||||
// GetName returns a normalized Name of the item
|
||||
@@ -281,6 +316,24 @@ func (i *Item) GetFolder() *FolderFacet {
|
||||
return i.Folder
|
||||
}
|
||||
|
||||
// GetPackage returns a normalized Package of the item
|
||||
func (i *Item) GetPackage() *PackageFacet {
|
||||
if i.IsRemote() && i.RemoteItem.Package != nil {
|
||||
return i.RemoteItem.Package
|
||||
}
|
||||
return i.Package
|
||||
}
|
||||
|
||||
// GetPackageType returns the package type of the item if available,
|
||||
// otherwise ""
|
||||
func (i *Item) GetPackageType() string {
|
||||
pack := i.GetPackage()
|
||||
if pack == nil {
|
||||
return ""
|
||||
}
|
||||
return pack.Type
|
||||
}
|
||||
|
||||
// GetFile returns a normalized File of the item
|
||||
func (i *Item) GetFile() *FileFacet {
|
||||
if i.IsRemote() && i.RemoteItem.File != nil {
|
||||
@@ -345,8 +398,8 @@ func (i *Item) GetLastModifiedDateTime() Timestamp {
|
||||
return i.LastModifiedDateTime
|
||||
}
|
||||
|
||||
// GetParentReferance returns a normalized ParentReferance of the item
|
||||
func (i *Item) GetParentReferance() *ItemReference {
|
||||
// GetParentReference returns a normalized ParentReference of the item
|
||||
func (i *Item) GetParentReference() *ItemReference {
|
||||
if i.IsRemote() && i.ParentReference == nil {
|
||||
return i.RemoteItem.ParentReference
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,10 @@
|
||||
// Test OneDrive filesystem interface
|
||||
package onedrive_test
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/onedrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,15 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestOneDrive:",
|
||||
NilObject: (*onedrive.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
CeilChunkSize: fstests.NextMultipleOf(chunkSizeMultiple),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -20,6 +21,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/dircache"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -117,7 +119,7 @@ func (f *Fs) DirCacheFlush() {
|
||||
f.dirCache.ResetRoot()
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -177,17 +179,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, "0", &newF)
|
||||
newF.root = newRoot
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, "0", &tempF)
|
||||
tempF.root = newRoot
|
||||
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -195,8 +197,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -778,7 +785,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
remote := path.Join(dir, folder.Name)
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, folder.FolderID)
|
||||
d := fs.NewDir(remote, time.Unix(int64(folder.DateModified), 0)).SetID(folder.FolderID)
|
||||
d := fs.NewDir(remote, time.Unix(folder.DateModified, 0)).SetID(folder.FolderID)
|
||||
d.SetItems(int64(folder.ChildFolders))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
@@ -925,8 +932,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// resp.Body.Close()
|
||||
// fs.Debugf(nil, "PostOpen: %#v", openResponse)
|
||||
|
||||
// 1 MB chunks size
|
||||
// 10 MB chunks size
|
||||
chunkSize := int64(1024 * 1024 * 10)
|
||||
buf := make([]byte, int(chunkSize))
|
||||
chunkOffset := int64(0)
|
||||
remainingBytes := size
|
||||
chunkCounter := 0
|
||||
@@ -939,14 +947,19 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
remainingBytes -= currentChunkSize
|
||||
fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes)
|
||||
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, currentChunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
var formBody bytes.Buffer
|
||||
w := multipart.NewWriter(&formBody)
|
||||
fw, err := w.CreateFormFile("file_data", o.remote)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if _, err = io.CopyN(fw, in, currentChunkSize); err != nil {
|
||||
if _, err = io.Copy(fw, chunk); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Add session_id
|
||||
@@ -1077,7 +1090,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + rest.URLPathEscape(replaceReservedChars(leaf)),
|
||||
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + url.QueryEscape(replaceReservedChars(leaf)),
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
|
||||
@@ -13,7 +13,7 @@ type Error struct {
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
// Error satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (Error %d)", e.Info.Message, e.Info.Code)
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ type Error struct {
|
||||
ErrorString string `json:"error"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("pcloud error: %s (%d)", e.ErrorString, e.Result)
|
||||
}
|
||||
@@ -58,7 +58,7 @@ func (e *Error) Update(err error) error {
|
||||
return e
|
||||
}
|
||||
|
||||
// Check Error statisfies the error interface
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||
@@ -161,7 +161,6 @@ type UserInfo struct {
|
||||
PublicLinkQuota int64 `json:"publiclinkquota"`
|
||||
Email string `json:"email"`
|
||||
UserID int `json:"userid"`
|
||||
Result int `json:"result"`
|
||||
Quota int64 `json:"quota"`
|
||||
TrashRevretentionDays int `json:"trashrevretentiondays"`
|
||||
Premium bool `json:"premium"`
|
||||
|
||||
@@ -246,7 +246,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Pcloud: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Pcloud")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
@@ -276,16 +276,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, rootID, &newF)
|
||||
newF.root = newRoot
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := newF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -293,8 +293,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -380,7 +385,7 @@ func fileIDtoNumber(fileID string) string {
|
||||
if len(fileID) > 0 && fileID[0] == 'f' {
|
||||
return fileID[1:]
|
||||
}
|
||||
fs.Debugf(nil, "Invalid filee id %q", fileID)
|
||||
fs.Debugf(nil, "Invalid file id %q", fileID)
|
||||
return fileID
|
||||
}
|
||||
|
||||
|
||||
@@ -69,17 +69,57 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "connection_retries",
|
||||
Help: "Number of connnection retries.",
|
||||
Help: "Number of connection retries.",
|
||||
Default: 3,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
NB if you set this to > 1 then the checksums of multpart uploads
|
||||
become corrupted (the uploads themselves are not corrupted though).
|
||||
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: 1,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Constants
|
||||
const (
|
||||
listLimitSize = 1000 // Number of items to read at once
|
||||
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
|
||||
listLimitSize = 1000 // Number of items to read at once
|
||||
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
|
||||
minChunkSize = fs.SizeSuffix(minMultiPartSize)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -92,12 +132,15 @@ func timestampToTime(tp int64) time.Time {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
}
|
||||
|
||||
// Fs represents a remote qingstor server
|
||||
@@ -227,6 +270,36 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
return qs.Init(cf)
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -235,6 +308,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: chunk size")
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: upload cutoff")
|
||||
}
|
||||
bucket, key, err := qsParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -368,7 +449,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
_, err = bucketInit.PutObject(key, &req)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Copied Faild, API Error: %v", err)
|
||||
fs.Debugf(f, "Copy Failed, API Error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(remote)
|
||||
@@ -675,7 +756,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
}
|
||||
switch *statistics.Status {
|
||||
case "deleted":
|
||||
fs.Debugf(f, "Wiat for qingstor sync bucket status, retries: %d", retries)
|
||||
fs.Debugf(f, "Wait for qingstor sync bucket status, retries: %d", retries)
|
||||
time.Sleep(time.Second * 1)
|
||||
retries++
|
||||
continue
|
||||
@@ -794,7 +875,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
fs.Debugf(o, "Read metadata of key: %s", key)
|
||||
resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{})
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Read metadata faild, API Error: %v", err)
|
||||
fs.Debugf(o, "Read metadata failed, API Error: %v", err)
|
||||
if e, ok := err.(*qsErr.QingStorError); ok {
|
||||
if e.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
@@ -913,16 +994,24 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
mimeType := fs.MimeType(src)
|
||||
|
||||
req := uploadInput{
|
||||
body: in,
|
||||
qsSvc: o.fs.svc,
|
||||
bucket: o.fs.bucket,
|
||||
zone: o.fs.zone,
|
||||
key: key,
|
||||
mimeType: mimeType,
|
||||
body: in,
|
||||
qsSvc: o.fs.svc,
|
||||
bucket: o.fs.bucket,
|
||||
zone: o.fs.zone,
|
||||
key: key,
|
||||
mimeType: mimeType,
|
||||
partSize: int64(o.fs.opt.ChunkSize),
|
||||
concurrency: o.fs.opt.UploadConcurrency,
|
||||
}
|
||||
uploader := newUploader(&req)
|
||||
|
||||
err = uploader.upload()
|
||||
size := src.Size()
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if multipart {
|
||||
err = uploader.upload()
|
||||
} else {
|
||||
err = uploader.singlePartUpload(in, size)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
// +build !plan9
|
||||
|
||||
package qingstor_test
|
||||
package qingstor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/qingstor"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -15,6 +15,19 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestQingStor:",
|
||||
NilObject: (*qingstor.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -143,7 +143,7 @@ func (u *uploader) init() {
|
||||
|
||||
// Try to adjust partSize if it is too small and account for
|
||||
// integer division truncation.
|
||||
if u.totalSize/u.cfg.partSize >= int64(u.cfg.partSize) {
|
||||
if u.totalSize/u.cfg.partSize >= u.cfg.partSize {
|
||||
// Add one to the part size to account for remainders
|
||||
// during the size calculation. e.g odd number of bytes.
|
||||
u.cfg.partSize = (u.totalSize / int64(u.cfg.maxUploadParts)) + 1
|
||||
@@ -152,18 +152,18 @@ func (u *uploader) init() {
|
||||
}
|
||||
|
||||
// singlePartUpload upload a single object that contentLength less than "defaultUploadPartSize"
|
||||
func (u *uploader) singlePartUpload(buf io.ReadSeeker) error {
|
||||
func (u *uploader) singlePartUpload(buf io.Reader, size int64) error {
|
||||
bucketInit, _ := u.bucketInit()
|
||||
|
||||
req := qs.PutObjectInput{
|
||||
ContentLength: &u.readerPos,
|
||||
ContentLength: &size,
|
||||
ContentType: &u.cfg.mimeType,
|
||||
Body: buf,
|
||||
}
|
||||
|
||||
_, err := bucketInit.PutObject(u.cfg.key, &req)
|
||||
if err == nil {
|
||||
fs.Debugf(u, "Upload single objcet finished")
|
||||
fs.Debugf(u, "Upload single object finished")
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -179,13 +179,13 @@ func (u *uploader) upload() error {
|
||||
// Do one read to determine if we have more than one part
|
||||
reader, _, err := u.nextReader()
|
||||
if err == io.EOF { // single part
|
||||
fs.Debugf(u, "Tried to upload a singile object to QingStor")
|
||||
return u.singlePartUpload(reader)
|
||||
fs.Debugf(u, "Uploading as single part object to QingStor")
|
||||
return u.singlePartUpload(reader, u.readerPos)
|
||||
} else if err != nil {
|
||||
return errors.Errorf("read upload data failed: %s", err)
|
||||
}
|
||||
|
||||
fs.Debugf(u, "Treied to upload a multi-part object to QingStor")
|
||||
fs.Debugf(u, "Uploading as multi-part object to QingStor")
|
||||
mu := multiUploader{uploader: u}
|
||||
return mu.multiPartUpload(reader)
|
||||
}
|
||||
@@ -261,7 +261,7 @@ func (mu *multiUploader) initiate() error {
|
||||
req := qs.InitiateMultipartUploadInput{
|
||||
ContentType: &mu.cfg.mimeType,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to initiate a multi-part upload")
|
||||
fs.Debugf(mu, "Initiating a multi-part upload")
|
||||
rsp, err := bucketInit.InitiateMultipartUpload(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
mu.uploadID = rsp.UploadID
|
||||
@@ -279,12 +279,12 @@ func (mu *multiUploader) send(c chunk) error {
|
||||
ContentLength: &c.size,
|
||||
Body: c.buffer,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to upload a part to QingStor that partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
fs.Debugf(mu, "Uploading a part to QingStor with partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
_, err := bucketInit.UploadMultipart(mu.cfg.key, &req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debugf(mu, "Upload part finished that partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
fs.Debugf(mu, "Done uploading part partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
|
||||
mu.mtx.Lock()
|
||||
defer mu.mtx.Unlock()
|
||||
@@ -304,7 +304,7 @@ func (mu *multiUploader) list() error {
|
||||
req := qs.ListMultipartInput{
|
||||
UploadID: mu.uploadID,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to list a multi-part")
|
||||
fs.Debugf(mu, "Reading multi-part details")
|
||||
rsp, err := bucketInit.ListMultipart(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
mu.objectParts = rsp.ObjectParts
|
||||
@@ -331,7 +331,7 @@ func (mu *multiUploader) complete() error {
|
||||
ObjectParts: mu.objectParts,
|
||||
ETag: &md5String,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to complete a multi-part")
|
||||
fs.Debugf(mu, "Completing multi-part object")
|
||||
_, err = bucketInit.CompleteMultipartUpload(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
fs.Debugf(mu, "Complete multi-part finished")
|
||||
@@ -348,7 +348,7 @@ func (mu *multiUploader) abort() error {
|
||||
req := qs.AbortMultipartUploadInput{
|
||||
UploadID: uploadID,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to abort a multi-part")
|
||||
fs.Debugf(mu, "Aborting multi-part object %q", *uploadID)
|
||||
_, err = bucketInit.AbortMultipartUpload(mu.cfg.key, &req)
|
||||
}
|
||||
|
||||
@@ -392,6 +392,14 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) error {
|
||||
var nextChunkLen int
|
||||
reader, nextChunkLen, err = mu.nextReader()
|
||||
if err != nil && err != io.EOF {
|
||||
// empty ch
|
||||
go func() {
|
||||
for range ch {
|
||||
}
|
||||
}()
|
||||
// Wait for all goroutines finish
|
||||
close(ch)
|
||||
mu.wg.Wait()
|
||||
return err
|
||||
}
|
||||
if nextChunkLen == 0 && partNumber > 0 {
|
||||
|
||||
650
backend/s3/s3.go
650
backend/s3/s3.go
@@ -39,9 +39,11 @@ import (
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
@@ -51,7 +53,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)",
|
||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
@@ -59,6 +61,9 @@ func init() {
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "AWS",
|
||||
Help: "Amazon Web Services (AWS) S3",
|
||||
}, {
|
||||
Value: "Alibaba",
|
||||
Help: "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
|
||||
}, {
|
||||
Value: "Ceph",
|
||||
Help: "Ceph Object Storage",
|
||||
@@ -74,6 +79,9 @@ func init() {
|
||||
}, {
|
||||
Value: "Minio",
|
||||
Help: "Minio Object Storage",
|
||||
}, {
|
||||
Value: "Netease",
|
||||
Help: "Netease Object Storage (NOS)",
|
||||
}, {
|
||||
Value: "Wasabi",
|
||||
Help: "Wasabi Object Storage",
|
||||
@@ -123,6 +131,9 @@ func init() {
|
||||
}, {
|
||||
Value: "eu-west-2",
|
||||
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
|
||||
}, {
|
||||
Value: "eu-north-1",
|
||||
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
|
||||
}, {
|
||||
Value: "eu-central-1",
|
||||
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
|
||||
@@ -148,7 +159,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS",
|
||||
Provider: "!AWS,Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
||||
@@ -226,10 +237,10 @@ func init() {
|
||||
Help: "EU Cross Region Amsterdam Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.eu-gb.objectstorage.softlayer.net",
|
||||
Help: "Great Britan Endpoint",
|
||||
Help: "Great Britain Endpoint",
|
||||
}, {
|
||||
Value: "s3.eu-gb.objectstorage.service.networklayer.com",
|
||||
Help: "Great Britan Private Endpoint",
|
||||
Help: "Great Britain Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.ap-geo.objectstorage.softlayer.net",
|
||||
Help: "APAC Cross Regional Endpoint",
|
||||
@@ -267,12 +278,75 @@ func init() {
|
||||
Value: "s3.tor01.objectstorage.service.networklayer.com",
|
||||
Help: "Toronto Single Site Private Endpoint",
|
||||
}},
|
||||
}, {
|
||||
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for OSS API.",
|
||||
Provider: "Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "oss-cn-hangzhou.aliyuncs.com",
|
||||
Help: "East China 1 (Hangzhou)",
|
||||
}, {
|
||||
Value: "oss-cn-shanghai.aliyuncs.com",
|
||||
Help: "East China 2 (Shanghai)",
|
||||
}, {
|
||||
Value: "oss-cn-qingdao.aliyuncs.com",
|
||||
Help: "North China 1 (Qingdao)",
|
||||
}, {
|
||||
Value: "oss-cn-beijing.aliyuncs.com",
|
||||
Help: "North China 2 (Beijing)",
|
||||
}, {
|
||||
Value: "oss-cn-zhangjiakou.aliyuncs.com",
|
||||
Help: "North China 3 (Zhangjiakou)",
|
||||
}, {
|
||||
Value: "oss-cn-huhehaote.aliyuncs.com",
|
||||
Help: "North China 5 (Huhehaote)",
|
||||
}, {
|
||||
Value: "oss-cn-shenzhen.aliyuncs.com",
|
||||
Help: "South China 1 (Shenzhen)",
|
||||
}, {
|
||||
Value: "oss-cn-hongkong.aliyuncs.com",
|
||||
Help: "Hong Kong (Hong Kong)",
|
||||
}, {
|
||||
Value: "oss-us-west-1.aliyuncs.com",
|
||||
Help: "US West 1 (Silicon Valley)",
|
||||
}, {
|
||||
Value: "oss-us-east-1.aliyuncs.com",
|
||||
Help: "US East 1 (Virginia)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-1.aliyuncs.com",
|
||||
Help: "Southeast Asia Southeast 1 (Singapore)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-2.aliyuncs.com",
|
||||
Help: "Asia Pacific Southeast 2 (Sydney)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-3.aliyuncs.com",
|
||||
Help: "Southeast Asia Southeast 3 (Kuala Lumpur)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-5.aliyuncs.com",
|
||||
Help: "Asia Pacific Southeast 5 (Jakarta)",
|
||||
}, {
|
||||
Value: "oss-ap-northeast-1.aliyuncs.com",
|
||||
Help: "Asia Pacific Northeast 1 (Japan)",
|
||||
}, {
|
||||
Value: "oss-ap-south-1.aliyuncs.com",
|
||||
Help: "Asia Pacific South 1 (Mumbai)",
|
||||
}, {
|
||||
Value: "oss-eu-central-1.aliyuncs.com",
|
||||
Help: "Central Europe 1 (Frankfurt)",
|
||||
}, {
|
||||
Value: "oss-eu-west-1.aliyuncs.com",
|
||||
Help: "West Europe (London)",
|
||||
}, {
|
||||
Value: "oss-me-east-1.aliyuncs.com",
|
||||
Help: "Middle East 1 (Dubai)",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS",
|
||||
Provider: "!AWS,IBMCOS,Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-west-1.dream.io",
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
Provider: "Dreamhost",
|
||||
}, {
|
||||
@@ -289,7 +363,11 @@ func init() {
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "s3.wasabisys.com",
|
||||
Help: "Wasabi Object Storage",
|
||||
Help: "Wasabi US East endpoint",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.us-west-1.wasabisys.com",
|
||||
Help: "Wasabi US West endpoint",
|
||||
Provider: "Wasabi",
|
||||
}},
|
||||
}, {
|
||||
@@ -317,6 +395,9 @@ func init() {
|
||||
}, {
|
||||
Value: "eu-west-2",
|
||||
Help: "EU (London) Region.",
|
||||
}, {
|
||||
Value: "eu-north-1",
|
||||
Help: "EU (Stockholm) Region.",
|
||||
}, {
|
||||
Value: "EU",
|
||||
Help: "EU Region.",
|
||||
@@ -369,7 +450,7 @@ func init() {
|
||||
Help: "US East Region Flex",
|
||||
}, {
|
||||
Value: "us-south-standard",
|
||||
Help: "US Sout hRegion Standard",
|
||||
Help: "US South Region Standard",
|
||||
}, {
|
||||
Value: "us-south-vault",
|
||||
Help: "US South Region Vault",
|
||||
@@ -393,16 +474,16 @@ func init() {
|
||||
Help: "EU Cross Region Flex",
|
||||
}, {
|
||||
Value: "eu-gb-standard",
|
||||
Help: "Great Britan Standard",
|
||||
Help: "Great Britain Standard",
|
||||
}, {
|
||||
Value: "eu-gb-vault",
|
||||
Help: "Great Britan Vault",
|
||||
Help: "Great Britain Vault",
|
||||
}, {
|
||||
Value: "eu-gb-cold",
|
||||
Help: "Great Britan Cold",
|
||||
Help: "Great Britain Cold",
|
||||
}, {
|
||||
Value: "eu-gb-flex",
|
||||
Help: "Great Britan Flex",
|
||||
Help: "Great Britain Flex",
|
||||
}, {
|
||||
Value: "ap-standard",
|
||||
Help: "APAC Standard",
|
||||
@@ -443,10 +524,17 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS",
|
||||
Provider: "!AWS,IBMCOS,Alibaba",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: "Canned ACL used when creating buckets and/or storing objects in S3.\nFor more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
|
||||
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
|
||||
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when server side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
@@ -488,6 +576,28 @@ func init() {
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
|
||||
Provider: "IBMCOS",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_acl",
|
||||
Help: `Canned ACL used when creating buckets.
|
||||
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when only when creating buckets. If it
|
||||
isn't set then "acl" is used instead.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
}, {
|
||||
Value: "public-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
||||
}, {
|
||||
Value: "public-read-write",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
|
||||
}, {
|
||||
Value: "authenticated-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "server_side_encryption",
|
||||
Help: "The server-side encryption algorithm used when storing this object in S3.",
|
||||
@@ -515,7 +625,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing objects in S3.",
|
||||
Help: "The storage class to use when storing new objects in S3.",
|
||||
Provider: "AWS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
@@ -532,11 +642,49 @@ func init() {
|
||||
}, {
|
||||
Value: "ONEZONE_IA",
|
||||
Help: "One Zone Infrequent Access storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Glacier storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Chunk size to use for uploading",
|
||||
Default: fs.SizeSuffix(s3manager.MinUploadPartSize),
|
||||
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in OSS.",
|
||||
Provider: "Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Default",
|
||||
}, {
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Archive storage mode.",
|
||||
}, {
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode.",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--s3-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
@@ -546,30 +694,57 @@ func init() {
|
||||
}, {
|
||||
Name: "session_token",
|
||||
Help: "An AWS session token",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: "Concurrency for multipart uploads.",
|
||||
Default: 2,
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: 4,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "force_path_style",
|
||||
Help: "If true use path style access if false use virtual hosted style.\nSome providers (eg Aliyun OSS or Netease COS) require this.",
|
||||
Name: "force_path_style",
|
||||
Help: `If true use path style access if false use virtual hosted style.
|
||||
|
||||
If this is true (the default) then rclone will use path style access,
|
||||
if false then rclone will use virtual path style. See [the AWS S3
|
||||
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
||||
for more info.
|
||||
|
||||
Some providers (eg Aliyun OSS or Netease COS) require this set to false.`,
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "v2_auth",
|
||||
Help: `If true use v2 authentication.
|
||||
|
||||
If this is false (the default) then rclone will use v4 authentication.
|
||||
If it is set then rclone will use v2 authentication.
|
||||
|
||||
Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Constants
|
||||
const (
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -582,14 +757,17 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ForcePathStyle bool `config:"force_path_style"`
|
||||
V2Auth bool `config:"v2_auth"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -604,6 +782,8 @@ type Fs struct {
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
}
|
||||
|
||||
// Object describes a s3 object
|
||||
@@ -649,6 +829,45 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
|
||||
var retryErrorCodes = []int{
|
||||
// 409, // Conflict - various states that could be resolved on a retry
|
||||
503, // Service Unavailable/Slow Down - "Reduce your request rate"
|
||||
}
|
||||
|
||||
//S3 is pretty resilient, and the built in retry handling is probably sufficient
|
||||
// as it should notice closed connections and timeouts which are the most likely
|
||||
// sort of failure modes
|
||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
// If this is an awserr object, try and extract more useful information to determine if we should retry
|
||||
if awsError, ok := err.(awserr.Error); ok {
|
||||
// Simple case, check the original embedded error in case it's generically retryable
|
||||
if fserrors.ShouldRetry(awsError.OrigErr()) {
|
||||
return true, err
|
||||
}
|
||||
// Failing that, if it's a RequestFailure it's probably got an http status code we can check
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// 301 if wrong region for bucket
|
||||
if reqErr.StatusCode() == http.StatusMovedPermanently {
|
||||
urfbErr := f.updateRegionForBucket()
|
||||
if urfbErr != nil {
|
||||
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
|
||||
return false, err
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
for _, e := range retryErrorCodes {
|
||||
if reqErr.StatusCode() == e {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ok, not an awserr, check for generic failure conditions
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// Pattern to match a s3 path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
@@ -722,17 +941,38 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
if opt.Region == "" {
|
||||
opt.Region = "us-east-1"
|
||||
}
|
||||
if opt.Provider == "Alibaba" || opt.Provider == "Netease" {
|
||||
opt.ForcePathStyle = false
|
||||
}
|
||||
awsConfig := aws.NewConfig().
|
||||
WithRegion(opt.Region).
|
||||
WithMaxRetries(maxRetries).
|
||||
WithCredentials(cred).
|
||||
WithEndpoint(opt.Endpoint).
|
||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle)
|
||||
if opt.Region != "" {
|
||||
awsConfig.WithRegion(opt.Region)
|
||||
}
|
||||
if opt.Endpoint != "" {
|
||||
awsConfig.WithEndpoint(opt.Endpoint)
|
||||
}
|
||||
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
ses := session.New()
|
||||
c := s3.New(ses, awsConfig)
|
||||
if opt.Region == "other-v2-signature" {
|
||||
awsSessionOpts := session.Options{
|
||||
Config: *awsConfig,
|
||||
}
|
||||
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
|
||||
// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
|
||||
awsSessionOpts.SharedConfigState = session.SharedConfigEnable
|
||||
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
|
||||
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
|
||||
awsSessionOpts.Config.Credentials = nil
|
||||
}
|
||||
ses, err := session.NewSessionWithOptions(awsSessionOpts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
c := s3.New(ses)
|
||||
if opt.V2Auth || opt.Region == "other-v2-signature" {
|
||||
fs.Debugf(nil, "Using v2 auth")
|
||||
signer := func(req *request.Request) {
|
||||
// Ignore AnonymousCredentials object
|
||||
@@ -748,6 +988,36 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
return c, ses, nil
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -756,13 +1026,24 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) {
|
||||
return nil, errors.Errorf("s3 chunk size (%v) must be >= %v", opt.ChunkSize, fs.SizeSuffix(s3manager.MinUploadPartSize))
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "s3: chunk size")
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "s3: upload cutoff")
|
||||
}
|
||||
bucket, directory, err := s3ParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ACL == "" {
|
||||
opt.ACL = "private"
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = opt.ACL
|
||||
}
|
||||
c, ses, err := s3Connection(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -774,6 +1055,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
c: c,
|
||||
bucket: bucket,
|
||||
ses: ses,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -787,7 +1070,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
Bucket: &f.bucket,
|
||||
Key: &directory,
|
||||
}
|
||||
_, err = f.c.HeadObject(&req)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.HeadObject(&req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
@@ -836,6 +1122,51 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// Gets the bucket location
|
||||
func (f *Fs) getBucketLocation() (string, error) {
|
||||
req := s3.GetBucketLocationInput{
|
||||
Bucket: &f.bucket,
|
||||
}
|
||||
var resp *s3.GetBucketLocationOutput
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.GetBucketLocation(&req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil
|
||||
}
|
||||
|
||||
// Updates the region for the bucket by reading the region from the
|
||||
// bucket then updating the session.
|
||||
func (f *Fs) updateRegionForBucket() error {
|
||||
region, err := f.getBucketLocation()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reading bucket location failed")
|
||||
}
|
||||
if aws.StringValue(f.c.Config.Endpoint) != "" {
|
||||
return errors.Errorf("can't set region to %q as endpoint is set", region)
|
||||
}
|
||||
if aws.StringValue(f.c.Config.Region) == region {
|
||||
return errors.Errorf("region is already %q - not updating", region)
|
||||
}
|
||||
|
||||
// Make a new session with the new region
|
||||
oldRegion := f.opt.Region
|
||||
f.opt.Region = region
|
||||
c, ses, err := s3Connection(&f.opt)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "creating new session failed")
|
||||
}
|
||||
f.c = c
|
||||
f.ses = ses
|
||||
|
||||
fs.Logf(f, "Switched region to %q from %q", region, oldRegion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
type listFn func(remote string, object *s3.Object, isDirectory bool) error
|
||||
|
||||
@@ -864,7 +1195,12 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
||||
MaxKeys: &maxKeys,
|
||||
Marker: marker,
|
||||
}
|
||||
resp, err := f.c.ListObjects(&req)
|
||||
var resp *s3.ListObjectsOutput
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.ListObjects(&req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||
if awsErr.StatusCode() == http.StatusNotFound {
|
||||
@@ -989,7 +1325,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
req := s3.ListBucketsInput{}
|
||||
resp, err := f.c.ListBuckets(&req)
|
||||
var resp *s3.ListBucketsOutput
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.ListBuckets(&req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1074,7 +1414,10 @@ func (f *Fs) dirExists() (bool, error) {
|
||||
req := s3.HeadBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
}
|
||||
_, err := f.c.HeadBucket(&req)
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.HeadBucket(&req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
@@ -1104,14 +1447,17 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
}
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.opt.ACL,
|
||||
ACL: &f.opt.BucketACL,
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
LocationConstraint: &f.opt.LocationConstraint,
|
||||
}
|
||||
}
|
||||
_, err := f.c.CreateBucket(&req)
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CreateBucket(&req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
if err.Code() == "BucketAlreadyOwnedByYou" {
|
||||
err = nil
|
||||
@@ -1120,6 +1466,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
if err == nil {
|
||||
f.bucketOK = true
|
||||
f.bucketDeleted = false
|
||||
fs.Infof(f, "Bucket created with ACL %q", *req.ACL)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1136,10 +1483,14 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
req := s3.DeleteBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
}
|
||||
_, err := f.c.DeleteBucket(&req)
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.DeleteBucket(&req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = false
|
||||
f.bucketDeleted = true
|
||||
fs.Infof(f, "Bucket deleted")
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1179,11 +1530,24 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
source := pathEscape(srcFs.bucket + "/" + srcFs.root + srcObj.remote)
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.opt.ACL,
|
||||
Key: &key,
|
||||
CopySource: &source,
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
}
|
||||
_, err = f.c.CopyObject(&req)
|
||||
if f.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &f.opt.ServerSideEncryption
|
||||
}
|
||||
if f.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
|
||||
}
|
||||
if f.opt.StorageClass != "" {
|
||||
req.StorageClass = &f.opt.StorageClass
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.CopyObject(&req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1260,7 +1624,12 @@ func (o *Object) readMetaData() (err error) {
|
||||
Bucket: &o.fs.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
resp, err := o.fs.c.HeadObject(&req)
|
||||
var resp *s3.HeadObjectOutput
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = o.fs.c.HeadObject(&req)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||
if awsErr.StatusCode() == http.StatusNotFound {
|
||||
@@ -1344,7 +1713,19 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
Metadata: o.meta,
|
||||
MetadataDirective: &directive,
|
||||
}
|
||||
_, err = o.fs.c.CopyObject(&req)
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := o.fs.c.CopyObject(&req)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1371,7 +1752,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
resp, err := o.fs.c.GetObject(&req)
|
||||
var resp *s3.GetObjectOutput
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = o.fs.c.GetObject(&req)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err, ok := err.(awserr.RequestFailure); ok {
|
||||
if err.Code() == "InvalidObjectState" {
|
||||
return nil, errors.Errorf("Object in GLACIER, restore first: %v", key)
|
||||
@@ -1392,38 +1778,46 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
modTime := src.ModTime()
|
||||
size := src.Size()
|
||||
|
||||
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
var uploader *s3manager.Uploader
|
||||
if multipart {
|
||||
uploader = s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
// S3 file size limit. Rounded up to nearest MB.
|
||||
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
return
|
||||
}
|
||||
// Adjust PartSize until the number of parts is small enough.
|
||||
if size/u.PartSize >= s3manager.MaxUploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
}
|
||||
})
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
// S3 file size limit. Rounded up to nearest MB.
|
||||
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
return
|
||||
}
|
||||
// Adjust PartSize until the number of parts is small enough.
|
||||
if size/u.PartSize >= s3manager.MaxUploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Set the mtime in the meta data
|
||||
metadata := map[string]*string{
|
||||
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
||||
}
|
||||
|
||||
if !o.fs.opt.DisableChecksum && size > uploader.PartSize {
|
||||
// read the md5sum if available for non multpart and if
|
||||
// disable checksum isn't present.
|
||||
var md5sum string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
hash, err := src.Hash(hash.MD5)
|
||||
|
||||
if err == nil && matchMd5.MatchString(hash) {
|
||||
hashBytes, err := hex.DecodeString(hash)
|
||||
|
||||
if err == nil {
|
||||
metadata[metaMD5Hash] = aws.String(base64.StdEncoding.EncodeToString(hashBytes))
|
||||
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if multipart {
|
||||
metadata[metaMD5Hash] = &md5sum
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1432,27 +1826,98 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
mimeType := fs.MimeType(src)
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
_, err = uploader.Upload(&req)
|
||||
if err != nil {
|
||||
return err
|
||||
if multipart {
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = uploader.Upload(&req)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
}
|
||||
if md5sum != "" {
|
||||
req.ContentMD5 = &md5sum
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
|
||||
// Create the request
|
||||
putObj, _ := o.fs.c.PutObjectRequest(&req)
|
||||
|
||||
// Sign it so we can upload using a presigned request.
|
||||
//
|
||||
// Note the SDK doesn't currently support streaming to
|
||||
// PutObject so we'll use this work-around.
|
||||
url, headers, err := putObj.PresignRequest(15 * time.Minute)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "s3 upload: sign request")
|
||||
}
|
||||
|
||||
// Set request to nil if empty so as not to make chunked encoding
|
||||
if size == 0 {
|
||||
in = nil
|
||||
}
|
||||
|
||||
// create the vanilla http request
|
||||
httpReq, err := http.NewRequest("PUT", url, in)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "s3 upload: new request")
|
||||
}
|
||||
|
||||
// set the headers we signed and the length
|
||||
httpReq.Header = headers
|
||||
httpReq.ContentLength = size
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Do(httpReq)
|
||||
if err != nil {
|
||||
return o.fs.shouldRetry(err)
|
||||
}
|
||||
body, err := rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
return o.fs.shouldRetry(err)
|
||||
}
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
|
||||
return false, nil
|
||||
}
|
||||
err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
|
||||
return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
@@ -1468,7 +1933,10 @@ func (o *Object) Remove() error {
|
||||
Bucket: &o.fs.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
_, err := o.fs.c.DeleteObject(&req)
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := o.fs.c.DeleteObject(&req)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test S3 filesystem interface
|
||||
package s3_test
|
||||
package s3
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/s3"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,19 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestS3:",
|
||||
NilObject: (*s3.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -44,16 +44,7 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
req.Header.Set("Date", date)
|
||||
|
||||
// Sort out URI
|
||||
uri := req.URL.Opaque
|
||||
if uri != "" {
|
||||
if strings.HasPrefix(uri, "//") {
|
||||
// Strip off //host/uri
|
||||
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
|
||||
req.URL.Opaque = uri // reset to plain URI otherwise Ceph gets confused
|
||||
}
|
||||
} else {
|
||||
uri = req.URL.Path
|
||||
}
|
||||
uri := req.URL.EscapedPath()
|
||||
if uri == "" {
|
||||
uri = "/"
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/xanzy/ssh-agent"
|
||||
sshagent "github.com/xanzy/ssh-agent"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
@@ -66,7 +66,22 @@ func init() {
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_file",
|
||||
Help: "Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.",
|
||||
Help: "Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.",
|
||||
}, {
|
||||
Name: "key_file_pass",
|
||||
Help: `The passphrase to decrypt the PEM-encoded private key file.
|
||||
|
||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_use_agent",
|
||||
Help: `When set forces the usage of the ssh-agent.
|
||||
|
||||
When key-file is also set, the ".pub" file of the specified key-file is read and only the associated key is
|
||||
requested from the ssh-agent. This allows to avoid ` + "`Too many authentication failures for *username*`" + ` errors
|
||||
when the ssh-agent contains many keys.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "use_insecure_cipher",
|
||||
Help: "Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.",
|
||||
@@ -90,9 +105,20 @@ func init() {
|
||||
Help: "Allow asking for SFTP password when needed.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "path_override",
|
||||
Default: "",
|
||||
Help: "Override path used by SSH connection.",
|
||||
Name: "path_override",
|
||||
Default: "",
|
||||
Help: `Override path used by SSH connection.
|
||||
|
||||
This allows checksum calculation when SFTP and SSH paths are
|
||||
different. This issue affects among others Synology NAS boxes.
|
||||
|
||||
Shared folders can be found in directories representing volumes
|
||||
|
||||
rclone sync /home/local/directory remote:/directory --ssh-path-override /volume2/directory
|
||||
|
||||
Home directory can be found in a shared folder called "home"
|
||||
|
||||
rclone sync /home/local/directory remote:/home/directory --ssh-path-override /volume1/homes/USER/directory`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "set_modtime",
|
||||
@@ -111,6 +137,8 @@ type Options struct {
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
@@ -287,6 +315,18 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// shellExpand replaces a leading "~" with "${HOME}" and expands all environment
|
||||
// variables afterwards.
|
||||
func shellExpand(s string) string {
|
||||
if s != "" {
|
||||
if s[0] == '~' {
|
||||
s = "${HOME}" + s[1:]
|
||||
}
|
||||
s = os.ExpandEnv(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -314,8 +354,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
|
||||
}
|
||||
|
||||
keyFile := shellExpand(opt.KeyFile)
|
||||
// Add ssh agent-auth if no password or file specified
|
||||
if opt.Pass == "" && opt.KeyFile == "" {
|
||||
if (opt.Pass == "" && keyFile == "") || opt.KeyUseAgent {
|
||||
sshAgentClient, _, err := sshagent.New()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
|
||||
@@ -324,16 +365,46 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read ssh agent signers")
|
||||
}
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
|
||||
if keyFile != "" {
|
||||
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read public key file")
|
||||
}
|
||||
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse public key file")
|
||||
}
|
||||
pubM := pub.Marshal()
|
||||
found := false
|
||||
for _, s := range signers {
|
||||
if bytes.Equal(pubM, s.PublicKey().Marshal()) {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(s))
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, errors.New("private key not found in the ssh-agent")
|
||||
}
|
||||
} else {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
|
||||
}
|
||||
}
|
||||
|
||||
// Load key file if specified
|
||||
if opt.KeyFile != "" {
|
||||
key, err := ioutil.ReadFile(opt.KeyFile)
|
||||
if keyFile != "" {
|
||||
key, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read private key file")
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKey(key)
|
||||
clearpass := ""
|
||||
if opt.KeyFilePass != "" {
|
||||
clearpass, err = obscure.Reveal(opt.KeyFilePass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKeyWithPassphrase(key, []byte(clearpass))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse private key file")
|
||||
}
|
||||
@@ -494,9 +565,13 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// If file is a symlink (not a regular file is the best cross platform test we can do), do a stat to
|
||||
// pick up the size and type of the destination, instead of the size and type of the symlink.
|
||||
if !info.Mode().IsRegular() {
|
||||
oldInfo := info
|
||||
info, err = f.stat(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "stat of non-regular file/dir failed")
|
||||
if !os.IsNotExist(err) {
|
||||
fs.Errorf(remote, "stat of non-regular file/dir failed: %v", err)
|
||||
}
|
||||
info = oldInfo
|
||||
}
|
||||
}
|
||||
if info.IsDir() {
|
||||
@@ -583,12 +658,22 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
// Check to see if directory is empty as some servers will
|
||||
// delete recursively with RemoveDirectory
|
||||
entries, err := f.List(dir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
// Remove the directory
|
||||
root := path.Join(f.root, dir)
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
err = c.sftpClient.Remove(root)
|
||||
err = c.sftpClient.RemoveDirectory(root)
|
||||
f.putSftpConnection(&c, err)
|
||||
return err
|
||||
}
|
||||
@@ -758,6 +843,10 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
if o.fs.opt.DisableHashCheck {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
c, err := o.fs.getSftpConnection()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Hash get SFTP connection")
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -29,15 +30,42 @@ import (
|
||||
const (
|
||||
directoryMarkerContentType = "application/directory" // content type of directory marker objects
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
defaultChunkSize = 5 * fs.GibiByte
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// SharedOptions are shared between swift and hubic
|
||||
var SharedOptions = []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
Help: `Above this size files will be chunked into a _segments container.
|
||||
|
||||
Above this size files will be chunked into a _segments container. The
|
||||
default for this is 5GB which is its maximum value.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_chunk",
|
||||
Help: `Don't chunk files during streaming upload.
|
||||
|
||||
When doing streaming uploads (eg using rcat or mount) setting this
|
||||
flag will cause the swift backend to not upload chunked files.
|
||||
|
||||
This will limit the maximum upload size to 5GB. However non chunked
|
||||
files are easier to deal with and have an MD5SUM.
|
||||
|
||||
Rclone will still chunk files bigger than chunk_size when doing normal
|
||||
copy operations.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "swift",
|
||||
Description: "Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Options: append([]fs.Option{{
|
||||
Name: "env_auth",
|
||||
Help: "Get swift credentials from environment variables in standard OpenStack form.",
|
||||
Default: false,
|
||||
@@ -102,6 +130,15 @@ func init() {
|
||||
}, {
|
||||
Name: "auth_token",
|
||||
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)",
|
||||
}, {
|
||||
Name: "application_credential_id",
|
||||
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)",
|
||||
}, {
|
||||
Name: "application_credential_name",
|
||||
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)",
|
||||
}, {
|
||||
Name: "application_credential_secret",
|
||||
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)",
|
||||
}, {
|
||||
Name: "auth_version",
|
||||
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)",
|
||||
@@ -121,8 +158,13 @@ func init() {
|
||||
Value: "admin",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_policy",
|
||||
Help: "The storage policy to use when creating a new container",
|
||||
Name: "storage_policy",
|
||||
Help: `The storage policy to use when creating a new container
|
||||
|
||||
This applies the specified storage policy when creating a new
|
||||
container. The policy cannot be changed afterwards. The allowed
|
||||
configuration values and their meaning depend on your Swift storage
|
||||
provider.`,
|
||||
Default: "",
|
||||
Examples: []fs.OptionExample{{
|
||||
Help: "Default",
|
||||
@@ -134,33 +176,32 @@ func init() {
|
||||
Help: "OVH Public Cloud Archive",
|
||||
Value: "pca",
|
||||
}},
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Above this size files will be chunked into a _segments container.",
|
||||
Default: fs.SizeSuffix(5 * 1024 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}},
|
||||
}}, SharedOptions...),
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
User string `config:"user"`
|
||||
Key string `config:"key"`
|
||||
Auth string `config:"auth"`
|
||||
UserID string `config:"user_id"`
|
||||
Domain string `config:"domain"`
|
||||
Tenant string `config:"tenant"`
|
||||
TenantID string `config:"tenant_id"`
|
||||
TenantDomain string `config:"tenant_domain"`
|
||||
Region string `config:"region"`
|
||||
StorageURL string `config:"storage_url"`
|
||||
AuthToken string `config:"auth_token"`
|
||||
AuthVersion int `config:"auth_version"`
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
User string `config:"user"`
|
||||
Key string `config:"key"`
|
||||
Auth string `config:"auth"`
|
||||
UserID string `config:"user_id"`
|
||||
Domain string `config:"domain"`
|
||||
Tenant string `config:"tenant"`
|
||||
TenantID string `config:"tenant_id"`
|
||||
TenantDomain string `config:"tenant_domain"`
|
||||
Region string `config:"region"`
|
||||
StorageURL string `config:"storage_url"`
|
||||
AuthToken string `config:"auth_token"`
|
||||
AuthVersion int `config:"auth_version"`
|
||||
ApplicationCredentialID string `config:"application_credential_id"`
|
||||
ApplicationCredentialName string `config:"application_credential_name"`
|
||||
ApplicationCredentialSecret string `config:"application_credential_secret"`
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NoChunk bool `config:"no_chunk"`
|
||||
}
|
||||
|
||||
// Fs represents a remote swift server
|
||||
@@ -175,16 +216,20 @@ type Fs struct {
|
||||
containerOK bool // true if we have created the container
|
||||
segmentsContainer string // container to store the segments (if any) in
|
||||
noCheckContainer bool // don't check the container before creating it
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
info swift.Object // Info from the swift object if known
|
||||
headers swift.Headers // The object headers if known
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64
|
||||
lastModified time.Time
|
||||
contentType string
|
||||
md5 string
|
||||
headers swift.Headers // The object headers if known
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -215,6 +260,32 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
401, // Unauthorized (eg "Token has expired")
|
||||
408, // Request Timeout
|
||||
409, // Conflict - various states that could be resolved on a retry
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
503, // Service Unavailable/Slow Down - "Reduce your request rate"
|
||||
504, // Gateway Time-out
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
// If this is an swift.Error object extract the HTTP error code
|
||||
if swiftError, ok := err.(*swift.Error); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if swiftError.StatusCode == e {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check for generic failure conditions
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// Pattern to match a swift path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
@@ -234,22 +305,25 @@ func parsePath(path string) (container, directory string, err error) {
|
||||
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
c := &swift.Connection{
|
||||
// Keep these in the same order as the Config for ease of checking
|
||||
UserName: opt.User,
|
||||
ApiKey: opt.Key,
|
||||
AuthUrl: opt.Auth,
|
||||
UserId: opt.UserID,
|
||||
Domain: opt.Domain,
|
||||
Tenant: opt.Tenant,
|
||||
TenantId: opt.TenantID,
|
||||
TenantDomain: opt.TenantDomain,
|
||||
Region: opt.Region,
|
||||
StorageUrl: opt.StorageURL,
|
||||
AuthToken: opt.AuthToken,
|
||||
AuthVersion: opt.AuthVersion,
|
||||
EndpointType: swift.EndpointType(opt.EndpointType),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(fs.Config),
|
||||
UserName: opt.User,
|
||||
ApiKey: opt.Key,
|
||||
AuthUrl: opt.Auth,
|
||||
UserId: opt.UserID,
|
||||
Domain: opt.Domain,
|
||||
Tenant: opt.Tenant,
|
||||
TenantId: opt.TenantID,
|
||||
TenantDomain: opt.TenantDomain,
|
||||
Region: opt.Region,
|
||||
StorageUrl: opt.StorageURL,
|
||||
AuthToken: opt.AuthToken,
|
||||
AuthVersion: opt.AuthVersion,
|
||||
ApplicationCredentialId: opt.ApplicationCredentialID,
|
||||
ApplicationCredentialName: opt.ApplicationCredentialName,
|
||||
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
|
||||
EndpointType: swift.EndpointType(opt.EndpointType),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(fs.Config),
|
||||
}
|
||||
if opt.EnvAuth {
|
||||
err := c.ApplyEnvironment()
|
||||
@@ -259,11 +333,13 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
}
|
||||
StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint
|
||||
if !c.Authenticated() {
|
||||
if c.UserName == "" && c.UserId == "" {
|
||||
return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
|
||||
}
|
||||
if c.ApiKey == "" {
|
||||
return nil, errors.New("key not found")
|
||||
if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret == "" {
|
||||
if c.UserName == "" && c.UserId == "" {
|
||||
return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
|
||||
}
|
||||
if c.ApiKey == "" {
|
||||
return nil, errors.New("key not found")
|
||||
}
|
||||
}
|
||||
if c.AuthUrl == "" {
|
||||
return nil, errors.New("auth not found")
|
||||
@@ -291,7 +367,23 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NewFsWithConnection contstructs an Fs from the path, container:path
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFsWithConnection constructs an Fs from the path, container:path
|
||||
// and authenticated connection.
|
||||
//
|
||||
// if noCheckContainer is set then the Fs won't check the container
|
||||
@@ -309,6 +401,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
segmentsContainer: container + "_segments",
|
||||
root: directory,
|
||||
noCheckContainer: noCheckContainer,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -318,7 +411,11 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists - ignoring directory markers
|
||||
info, _, err := f.c.Object(container, directory)
|
||||
var info swift.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, _, err = f.c.Object(container, directory)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil && info.ContentType != directoryMarkerContentType {
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
@@ -333,7 +430,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -341,6 +438,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "swift: chunk size")
|
||||
}
|
||||
|
||||
c, err := swiftConnection(opt, name)
|
||||
if err != nil {
|
||||
@@ -366,7 +467,10 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not headers
|
||||
o.info = *info
|
||||
err := o.decodeMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
@@ -404,7 +508,12 @@ func (f *Fs) listContainerRoot(container, root string, dir string, recurse bool,
|
||||
}
|
||||
rootLength := len(root)
|
||||
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
objects, err := f.c.Objects(container, opts)
|
||||
var objects []swift.Object
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = f.c.Objects(container, opts)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
for i := range objects {
|
||||
object := &objects[i]
|
||||
@@ -493,7 +602,11 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
containers, err := f.c.ContainersAll(nil)
|
||||
var containers []swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "container listing failed")
|
||||
}
|
||||
@@ -554,7 +667,12 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
containers, err := f.c.ContainersAll(nil)
|
||||
var containers []swift.Container
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "container listing failed")
|
||||
}
|
||||
@@ -604,14 +722,20 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// Check to see if container exists first
|
||||
var err error = swift.ContainerNotFound
|
||||
if !f.noCheckContainer {
|
||||
_, _, err = f.c.Container(f.container)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, _, err = f.c.Container(f.container)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if f.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = f.opt.StoragePolicy
|
||||
}
|
||||
err = f.c.ContainerCreate(f.container, headers)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerCreate(f.container, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
if err == nil {
|
||||
f.containerOK = true
|
||||
@@ -628,7 +752,11 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
err := f.c.ContainerDelete(f.container)
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerDelete(f.container)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.containerOK = false
|
||||
}
|
||||
@@ -687,7 +815,10 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
_, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -736,7 +867,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
fs.Debugf(o, "Returning empty Md5sum for swift large object")
|
||||
return "", nil
|
||||
}
|
||||
return strings.ToLower(o.info.Hash), nil
|
||||
return strings.ToLower(o.md5), nil
|
||||
}
|
||||
|
||||
// hasHeader checks for the header passed in returning false if the
|
||||
@@ -765,7 +896,22 @@ func (o *Object) isStaticLargeObject() (bool, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.info.Bytes
|
||||
return o.size
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata in the object from a swift.Object
|
||||
//
|
||||
// Sets
|
||||
// o.lastModified
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.contentType
|
||||
func (o *Object) decodeMetaData(info *swift.Object) (err error) {
|
||||
o.lastModified = info.LastModified
|
||||
o.size = info.Bytes
|
||||
o.md5 = info.Hash
|
||||
o.contentType = info.ContentType
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
@@ -777,15 +923,23 @@ func (o *Object) readMetaData() (err error) {
|
||||
if o.headers != nil {
|
||||
return nil
|
||||
}
|
||||
info, h, err := o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
|
||||
var info swift.Object
|
||||
var h swift.Headers
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, h, err = o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if err == swift.ObjectNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
o.info = info
|
||||
o.headers = h
|
||||
err = o.decodeMetaData(&info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -796,17 +950,17 @@ func (o *Object) readMetaData() (err error) {
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
if fs.Config.UseServerModTime {
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read metadata: %s", err)
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
modTime, err := o.headers.ObjectMetadata().GetModTime()
|
||||
if err != nil {
|
||||
// fs.Logf(o, "Failed to read mtime from object: %v", err)
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
@@ -829,7 +983,10 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
newHeaders[k] = v
|
||||
}
|
||||
}
|
||||
return o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
@@ -837,14 +994,17 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
// It compares the Content-Type to directoryMarkerContentType - that
|
||||
// makes it a directory marker which is not storable.
|
||||
func (o *Object) Storable() bool {
|
||||
return o.info.ContentType != directoryMarkerContentType
|
||||
return o.contentType != directoryMarkerContentType
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
_, isRanging := headers["Range"]
|
||||
in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -871,13 +1031,20 @@ func (o *Object) removeSegments(except string) error {
|
||||
}
|
||||
segmentPath := segmentsRoot + remote
|
||||
fs.Debugf(o, "Removing segment file %q in container %q", segmentPath, o.fs.segmentsContainer)
|
||||
return o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
|
||||
var err error
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the segments container if empty, ignore errors
|
||||
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Debugf(o, "Removed empty container %q", o.fs.segmentsContainer)
|
||||
}
|
||||
@@ -906,13 +1073,19 @@ func urlEncode(str string) string {
|
||||
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
|
||||
// Create the segmentsContainer if it doesn't exist
|
||||
var err error
|
||||
_, _, err = o.fs.c.Container(o.fs.segmentsContainer)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, _, err = o.fs.c.Container(o.fs.segmentsContainer)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if o.fs.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
|
||||
}
|
||||
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -941,7 +1114,10 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
segmentReader := io.LimitReader(in, n)
|
||||
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
|
||||
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
|
||||
_, err := o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -952,7 +1128,10 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
headers["Content-Length"] = "0" // set Content-Length as we know it
|
||||
emptyReader := bytes.NewReader(nil)
|
||||
manifestName := o.fs.root + o.remote
|
||||
_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return uniquePrefix + "/", err
|
||||
}
|
||||
|
||||
@@ -982,17 +1161,31 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
contentType := fs.MimeType(src)
|
||||
headers := m.ObjectHeaders()
|
||||
uniquePrefix := ""
|
||||
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.headers = nil // wipe old metadata
|
||||
} else {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length as we know it
|
||||
_, err := o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
if size >= 0 {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
|
||||
}
|
||||
var rxHeaders swift.Headers
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// set Metadata since ObjectPut checked the hash and length so we know the
|
||||
// object has been safely uploaded
|
||||
o.lastModified = modTime
|
||||
o.size = size
|
||||
o.md5 = rxHeaders["ETag"]
|
||||
o.contentType = contentType
|
||||
o.headers = headers
|
||||
}
|
||||
|
||||
// If file was a dynamic large object then remove old/all segments
|
||||
@@ -1003,8 +1196,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.headers = nil // wipe old metadata
|
||||
// Read the metadata from the newly created object if necessary
|
||||
return o.readMetaData()
|
||||
}
|
||||
|
||||
@@ -1015,7 +1207,10 @@ func (o *Object) Remove() error {
|
||||
return err
|
||||
}
|
||||
// Remove file/manifest first
|
||||
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1031,7 +1226,7 @@ func (o *Object) Remove() error {
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
return o.info.ContentType
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test Swift filesystem interface
|
||||
package swift_test
|
||||
package swift
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,12 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSwift:",
|
||||
NilObject: (*swift.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
428
backend/union/union.go
Normal file
428
backend/union/union.go
Normal file
@@ -0,0 +1,428 @@
|
||||
package union
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "union",
|
||||
Description: "A stackable unification remote, which can appear to merge the contents of several remotes",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remotes",
|
||||
Help: "List of space separated remotes.\nCan be 'remotea:test/dir remoteb:', '\"remotea:test/space dir\" remoteb:', etc.\nThe last remote is used to write to.",
|
||||
Required: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remotes fs.SpaceSepList `config:"remotes"`
|
||||
}
|
||||
|
||||
// Fs represents a union of remotes
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
root string // the path we are working on
|
||||
remotes []fs.Fs // slice of remotes
|
||||
wr fs.Fs // writable remote
|
||||
hashSet hash.Set // intersection of hash types
|
||||
}
|
||||
|
||||
// Object describes a union Object
|
||||
//
|
||||
// This is a wrapped object which returns the Union Fs as its parent
|
||||
type Object struct {
|
||||
fs.Object
|
||||
fs *Fs // what this object is part of
|
||||
}
|
||||
|
||||
// Wrap an existing object in the union Object
|
||||
func (f *Fs) wrapObject(o fs.Object) *Object {
|
||||
return &Object{
|
||||
Object: o,
|
||||
fs: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns the union Fs as the parent
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("union root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return f.wr.Rmdir(dir)
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return f.hashSet
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
return f.wr.Mkdir(dir)
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge() error {
|
||||
return f.wr.Features().Purge()
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
if src.Fs() != f.wr {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
o, err := f.wr.Features().Copy(src, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
if src.Fs() != f.wr {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
o, err := f.wr.Features().Move(src, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return f.wr.Features().DirMove(srcFs.wr, srcRemote, dstRemote)
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
// At least one value will be written to the channel,
|
||||
// specifying the initial value and updated values might
|
||||
// follow. A 0 Duration should pause the polling.
|
||||
// The ChangeNotify implementation must empty the channel
|
||||
// regularly. When the channel gets closed, the implementation
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(fn func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
var remoteChans []chan time.Duration
|
||||
|
||||
for _, remote := range f.remotes {
|
||||
if ChangeNotify := remote.Features().ChangeNotify; ChangeNotify != nil {
|
||||
ch := make(chan time.Duration)
|
||||
remoteChans = append(remoteChans, ch)
|
||||
ChangeNotify(fn, ch)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
for i := range ch {
|
||||
for _, c := range remoteChans {
|
||||
c <- i
|
||||
}
|
||||
}
|
||||
for _, c := range remoteChans {
|
||||
close(c)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
for _, remote := range f.remotes {
|
||||
if DirCacheFlush := remote.Features().DirCacheFlush; DirCacheFlush != nil {
|
||||
DirCacheFlush()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.wr.Features().PutStream(in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
return f.wr.Features().About()
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.wr.Put(in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
set := make(map[string]fs.DirEntry)
|
||||
found := false
|
||||
for _, remote := range f.remotes {
|
||||
var remoteEntries, err = remote.List(dir)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "List failed on %v", remote)
|
||||
}
|
||||
found = true
|
||||
for _, remoteEntry := range remoteEntries {
|
||||
set[remoteEntry.Remote()] = remoteEntry
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
for _, entry := range set {
|
||||
if o, ok := entry.(fs.Object); ok {
|
||||
entry = f.wrapObject(o)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject creates a new remote union file object based on the first Object it finds (reverse remote order)
|
||||
func (f *Fs) NewObject(path string) (fs.Object, error) {
|
||||
for i := range f.remotes {
|
||||
var remote = f.remotes[len(f.remotes)-i-1]
|
||||
var obj, err = remote.NewObject(path)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "NewObject failed on %v", remote)
|
||||
}
|
||||
return f.wrapObject(obj), nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Precision is the greatest Precision of all remotes
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
var greatestPrecision time.Duration
|
||||
for _, remote := range f.remotes {
|
||||
if remote.Precision() > greatestPrecision {
|
||||
greatestPrecision = remote.Precision()
|
||||
}
|
||||
}
|
||||
return greatestPrecision
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(opt.Remotes) == 0 {
|
||||
return nil, errors.New("union can't point to an empty remote - check the value of the remotes setting")
|
||||
}
|
||||
if len(opt.Remotes) == 1 {
|
||||
return nil, errors.New("union can't point to a single remote - check the value of the remotes setting")
|
||||
}
|
||||
for _, remote := range opt.Remotes {
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point union remote at itself - check the value of the remote setting")
|
||||
}
|
||||
}
|
||||
|
||||
var remotes []fs.Fs
|
||||
for i := range opt.Remotes {
|
||||
// Last remote first so we return the correct (last) matching fs in case of fs.ErrorIsFile
|
||||
var remote = opt.Remotes[len(opt.Remotes)-i-1]
|
||||
_, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var rootString = path.Join(fsPath, filepath.ToSlash(root))
|
||||
if configName != "local" {
|
||||
rootString = configName + ":" + rootString
|
||||
}
|
||||
myFs, err := fs.NewFs(rootString)
|
||||
if err != nil {
|
||||
if err == fs.ErrorIsFile {
|
||||
return myFs, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
remotes = append(remotes, myFs)
|
||||
}
|
||||
|
||||
// Reverse the remotes again so they are in the order as before
|
||||
for i, j := 0, len(remotes)-1; i < j; i, j = i+1, j-1 {
|
||||
remotes[i], remotes[j] = remotes[j], remotes[i]
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
remotes: remotes,
|
||||
wr: remotes[len(remotes)-1],
|
||||
}
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
features = features.Mask(f.wr) // mask the features just on the writable fs
|
||||
|
||||
// Really need the union of all remotes for these, so
|
||||
// re-instate and calculate separately.
|
||||
features.ChangeNotify = f.ChangeNotify
|
||||
features.DirCacheFlush = f.DirCacheFlush
|
||||
|
||||
// FIXME maybe should be masking the bools here?
|
||||
|
||||
// Clear ChangeNotify and DirCacheFlush if all are nil
|
||||
clearChangeNotify := true
|
||||
clearDirCacheFlush := true
|
||||
for _, remote := range f.remotes {
|
||||
remoteFeatures := remote.Features()
|
||||
if remoteFeatures.ChangeNotify != nil {
|
||||
clearChangeNotify = false
|
||||
}
|
||||
if remoteFeatures.DirCacheFlush != nil {
|
||||
clearDirCacheFlush = false
|
||||
}
|
||||
}
|
||||
if clearChangeNotify {
|
||||
features.ChangeNotify = nil
|
||||
}
|
||||
if clearDirCacheFlush {
|
||||
features.DirCacheFlush = nil
|
||||
}
|
||||
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
hashSet := f.remotes[0].Hashes()
|
||||
for _, remote := range f.remotes[1:] {
|
||||
hashSet = hashSet.Overlap(remote.Hashes())
|
||||
}
|
||||
f.hashSet = hashSet
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user