mirror of
https://github.com/rclone/rclone.git
synced 2026-01-01 08:03:26 +00:00
Compare commits
542 Commits
v1.57.0
...
fix-union-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1d1d847f18 | ||
|
|
7a24c173f6 | ||
|
|
fb60aeddae | ||
|
|
695736d1e4 | ||
|
|
f0396070eb | ||
|
|
f1166757ba | ||
|
|
9b76434ad5 | ||
|
|
440d0cd179 | ||
|
|
a047d30eca | ||
|
|
03d0f331f7 | ||
|
|
049674aeab | ||
|
|
50f053cada | ||
|
|
140af43c26 | ||
|
|
f467188876 | ||
|
|
4a4379b312 | ||
|
|
8c02fe7b89 | ||
|
|
11be920e90 | ||
|
|
8c19b355a5 | ||
|
|
67fd60275a | ||
|
|
b310490fa5 | ||
|
|
0ee0812a2b | ||
|
|
55bbff6346 | ||
|
|
9c6cfc1ff0 | ||
|
|
f753d7cd42 | ||
|
|
f5be1d6b65 | ||
|
|
00a684d877 | ||
|
|
1c4ee2feee | ||
|
|
876f12f2c4 | ||
|
|
6e9c1eebd9 | ||
|
|
42dfadfa1b | ||
|
|
b4d847cadd | ||
|
|
502226bfc8 | ||
|
|
53400d7edc | ||
|
|
62bcc84f6f | ||
|
|
2e54b56a01 | ||
|
|
2515039e18 | ||
|
|
a9c531b9eb | ||
|
|
0db50ecb2f | ||
|
|
388da82762 | ||
|
|
b5efffee9d | ||
|
|
3ec07d5db9 | ||
|
|
5c6a958ad8 | ||
|
|
ad8c94e982 | ||
|
|
e5bf6a813c | ||
|
|
f18095b004 | ||
|
|
c70e890966 | ||
|
|
986bb17656 | ||
|
|
92a43c5f7b | ||
|
|
9612ca6110 | ||
|
|
1f9560e873 | ||
|
|
c9d67c86fb | ||
|
|
3e9c5eca3b | ||
|
|
a1fd60ec2b | ||
|
|
7b8c974dec | ||
|
|
5b579cea47 | ||
|
|
7822df565e | ||
|
|
3435bf7f34 | ||
|
|
0772cae314 | ||
|
|
060c8dfff0 | ||
|
|
424a1f39eb | ||
|
|
06182a3443 | ||
|
|
a58b482061 | ||
|
|
accf91742c | ||
|
|
9dbed02329 | ||
|
|
73e3bb09d7 | ||
|
|
7e7a8a95e9 | ||
|
|
ed87ae51c0 | ||
|
|
bf4a16ae30 | ||
|
|
c198700812 | ||
|
|
8c483daf85 | ||
|
|
ba5760ff38 | ||
|
|
cd1735bb10 | ||
|
|
866c873daa | ||
|
|
c556e98f49 | ||
|
|
22abd785eb | ||
|
|
a692bd2cd4 | ||
|
|
776a083892 | ||
|
|
d823a38ce5 | ||
|
|
78d52882ca | ||
|
|
c4451bc43a | ||
|
|
0652ec95db | ||
|
|
6a0e021dac | ||
|
|
461d041c4d | ||
|
|
35f24d5b84 | ||
|
|
370c8fa220 | ||
|
|
0fca4d2c86 | ||
|
|
5de9278650 | ||
|
|
326c43ab3f | ||
|
|
32006033e6 | ||
|
|
517e7d9271 | ||
|
|
fdd2f8e6d2 | ||
|
|
027746ef6e | ||
|
|
53f831f40a | ||
|
|
9f81b4df4f | ||
|
|
bf54c909c9 | ||
|
|
dbf1234edf | ||
|
|
70d9d75801 | ||
|
|
bc70a95fca | ||
|
|
ee87e919c5 | ||
|
|
4f0eae366f | ||
|
|
de5ccaab8e | ||
|
|
4b7dc35cf4 | ||
|
|
bc705e14d8 | ||
|
|
ea5bb79366 | ||
|
|
e95dff2fa1 | ||
|
|
99dfe1eeae | ||
|
|
ed92bf335d | ||
|
|
3d55b537c6 | ||
|
|
d03fffdf8d | ||
|
|
7a909ebfb0 | ||
|
|
ac0dc9922e | ||
|
|
8b8802a078 | ||
|
|
f2a15a174f | ||
|
|
21c746a56c | ||
|
|
36add0afbf | ||
|
|
14e0396fcb | ||
|
|
100acc570a | ||
|
|
b9de37af80 | ||
|
|
f7c36ce0f9 | ||
|
|
f829ded456 | ||
|
|
2fac8fdde6 | ||
|
|
8e2d9a4cb9 | ||
|
|
295006f662 | ||
|
|
dcc128c70d | ||
|
|
c85fbebce6 | ||
|
|
e59801c69b | ||
|
|
5697dbc80f | ||
|
|
7d3648dc46 | ||
|
|
a6ca4b3817 | ||
|
|
e57fe14b61 | ||
|
|
115f1c2cc9 | ||
|
|
5e4caa69ce | ||
|
|
e7483b40b3 | ||
|
|
fa48b880c2 | ||
|
|
4ac875a811 | ||
|
|
3f61869179 | ||
|
|
60d87185e1 | ||
|
|
78120d40d9 | ||
|
|
95e0934755 | ||
|
|
1651429041 | ||
|
|
29e37749b3 | ||
|
|
1e1af46a12 | ||
|
|
1d2fe0d856 | ||
|
|
4f94b27800 | ||
|
|
4d72abf389 | ||
|
|
411013dbdc | ||
|
|
e87e331f4c | ||
|
|
2e91287b2e | ||
|
|
a0cb3bbd02 | ||
|
|
4a382c09ec | ||
|
|
626a416ff8 | ||
|
|
6c832a72ee | ||
|
|
c390098262 | ||
|
|
41f3ceb67d | ||
|
|
592358148d | ||
|
|
93a25498cf | ||
|
|
32f913ffbd | ||
|
|
621c4ebe15 | ||
|
|
0279bf3abb | ||
|
|
50c2e37aac | ||
|
|
6602e1a851 | ||
|
|
02b4638a22 | ||
|
|
ec117593f1 | ||
|
|
74bd7f3381 | ||
|
|
afa30abd33 | ||
|
|
70d1d8d760 | ||
|
|
5006ede266 | ||
|
|
0f41e91d41 | ||
|
|
3a20929db4 | ||
|
|
cee79f27ee | ||
|
|
aeb5dc2892 | ||
|
|
cfe0911e0d | ||
|
|
7c1f2d7c84 | ||
|
|
5db9a2f831 | ||
|
|
b4091f282a | ||
|
|
218bf2183d | ||
|
|
bb6edb3c39 | ||
|
|
6c2331ffd7 | ||
|
|
08a897424b | ||
|
|
acd7ad9190 | ||
|
|
b246584a02 | ||
|
|
61a75bfe07 | ||
|
|
ef089dd867 | ||
|
|
e3d44612c1 | ||
|
|
b2388f1294 | ||
|
|
a571c1fb46 | ||
|
|
01340acad2 | ||
|
|
700ca23a71 | ||
|
|
f4f0e444bf | ||
|
|
20aaeba547 | ||
|
|
4b358ff43b | ||
|
|
e58d75e4d7 | ||
|
|
fb58737142 | ||
|
|
26db80c270 | ||
|
|
9eb3470c9c | ||
|
|
a449dd7d1c | ||
|
|
fc4fe33703 | ||
|
|
e11bfacfcf | ||
|
|
a9c49c50a0 | ||
|
|
8979337313 | ||
|
|
7ffab5d998 | ||
|
|
3ccf222acb | ||
|
|
2781f8e2f1 | ||
|
|
3d55f69338 | ||
|
|
cc9bc2cb80 | ||
|
|
80ac59ee5b | ||
|
|
5d6a6dd6c0 | ||
|
|
c676e2139d | ||
|
|
7361c98b2d | ||
|
|
5cc47de912 | ||
|
|
6d342a3c5b | ||
|
|
336051870e | ||
|
|
38c6d022bd | ||
|
|
c138367df6 | ||
|
|
da404dc0f2 | ||
|
|
28e43fe7af | ||
|
|
3ec25f437b | ||
|
|
a34276e9b3 | ||
|
|
c2baacc0a4 | ||
|
|
fcec4bedbe | ||
|
|
813a5e0931 | ||
|
|
bd4abb15a3 | ||
|
|
7f84283539 | ||
|
|
47b1a0d6fa | ||
|
|
ce168ecac2 | ||
|
|
4f0ddb60e7 | ||
|
|
b929a56f46 | ||
|
|
74af6409d4 | ||
|
|
0e77072dcc | ||
|
|
2437eb3cce | ||
|
|
a12c94caff | ||
|
|
542c1616b8 | ||
|
|
8697f0bd26 | ||
|
|
a9f18f8093 | ||
|
|
8e5e230b81 | ||
|
|
c0985e93b7 | ||
|
|
fb4f7555c7 | ||
|
|
f2e7a2e794 | ||
|
|
9e4854955c | ||
|
|
319ac225e4 | ||
|
|
a9d3283d97 | ||
|
|
edf0412464 | ||
|
|
e6194a4b83 | ||
|
|
7f05990623 | ||
|
|
e16f2a566f | ||
|
|
a36fef8a66 | ||
|
|
6500e1d205 | ||
|
|
9f7484e4e9 | ||
|
|
0ba702ccf4 | ||
|
|
6f91198b57 | ||
|
|
cf0a72aecd | ||
|
|
f6fd6ee777 | ||
|
|
1e66d052fd | ||
|
|
e5974ac4b0 | ||
|
|
50a0c3482d | ||
|
|
389a29b017 | ||
|
|
9dcf9375e8 | ||
|
|
1d6d41fb91 | ||
|
|
a3d4307892 | ||
|
|
a446106041 | ||
|
|
607172b6ec | ||
|
|
94757277bc | ||
|
|
deab86867c | ||
|
|
c0c5b3bc6b | ||
|
|
a947f298e6 | ||
|
|
1b0128ecb2 | ||
|
|
c5395db1f1 | ||
|
|
6e5382fc99 | ||
|
|
134592adaa | ||
|
|
36e614f550 | ||
|
|
7bfed98b48 | ||
|
|
f471096fd0 | ||
|
|
4cebade95d | ||
|
|
a8cd18faf3 | ||
|
|
e34c543660 | ||
|
|
598364ad0f | ||
|
|
211dbe9aee | ||
|
|
4829527dac | ||
|
|
cc8dde402f | ||
|
|
2b67ad17aa | ||
|
|
6da3522499 | ||
|
|
97606bbdef | ||
|
|
a15885dd74 | ||
|
|
87c201c92a | ||
|
|
d77736c21a | ||
|
|
86bd5f6922 | ||
|
|
fe271a4e35 | ||
|
|
75455d4000 | ||
|
|
82e24f521f | ||
|
|
5605e34f7b | ||
|
|
06598531e0 | ||
|
|
b1d43f8d41 | ||
|
|
b53c38c9fd | ||
|
|
03715f6c6b | ||
|
|
07481396e0 | ||
|
|
bab91e4402 | ||
|
|
fde40319ef | ||
|
|
94e330d4fa | ||
|
|
087543d723 | ||
|
|
6a759d936a | ||
|
|
7c31240bb8 | ||
|
|
25146b4306 | ||
|
|
240561850b | ||
|
|
39a1e37441 | ||
|
|
4c02f50ef5 | ||
|
|
f583b86334 | ||
|
|
118e8e1470 | ||
|
|
afcea9c72b | ||
|
|
27176cc6bb | ||
|
|
f1e4b7da7b | ||
|
|
f065a267f6 | ||
|
|
17f8014909 | ||
|
|
8ba04562c3 | ||
|
|
285747b1d1 | ||
|
|
7bb8b8f4ba | ||
|
|
59c242bbf6 | ||
|
|
a2bacd7d3f | ||
|
|
9babcc4811 | ||
|
|
a0f665ec3c | ||
|
|
ecdf42c17f | ||
|
|
be9ee1d138 | ||
|
|
9e9ead2ac4 | ||
|
|
4f78226f8b | ||
|
|
54c9c3156c | ||
|
|
6ecbbf796e | ||
|
|
603e51c43f | ||
|
|
ca4671126e | ||
|
|
6ea26b508a | ||
|
|
887cccb2c1 | ||
|
|
d975196cfa | ||
|
|
1f39b28f49 | ||
|
|
2738db22fb | ||
|
|
1978ddde73 | ||
|
|
c2bfda22ab | ||
|
|
d4da9b98d6 | ||
|
|
e4f5912294 | ||
|
|
750fffdf71 | ||
|
|
388e74af52 | ||
|
|
f9354fff2f | ||
|
|
ff1f173fc2 | ||
|
|
f8073a7b63 | ||
|
|
807f1cedaa | ||
|
|
bf9c68c88a | ||
|
|
189cba0fbe | ||
|
|
69f726f16c | ||
|
|
65652f7a75 | ||
|
|
47f9ab2f56 | ||
|
|
5dd51e6149 | ||
|
|
6a6d254a9f | ||
|
|
fd453f2c7b | ||
|
|
5d06a82c5d | ||
|
|
847868b4ba | ||
|
|
38ca178cf3 | ||
|
|
9427d22f99 | ||
|
|
7b1428a498 | ||
|
|
ec72432cec | ||
|
|
2339172df2 | ||
|
|
268b808bf8 | ||
|
|
74898bac3b | ||
|
|
e0fbca02d4 | ||
|
|
21355b4208 | ||
|
|
251b84ff2c | ||
|
|
537b62917f | ||
|
|
71a784cfa2 | ||
|
|
8ee0fe9863 | ||
|
|
8f164e4df5 | ||
|
|
06ecc6511b | ||
|
|
3529bdec9b | ||
|
|
486b43f8c7 | ||
|
|
89f0e4df80 | ||
|
|
399fb5b7fb | ||
|
|
19f1ed949c | ||
|
|
d3a1001094 | ||
|
|
dc7e3ea1e3 | ||
|
|
f22b703a51 | ||
|
|
c40129d610 | ||
|
|
8dc93f1792 | ||
|
|
f4c40bf79d | ||
|
|
9cc50a614b | ||
|
|
bcb07a67f6 | ||
|
|
25ea04f1db | ||
|
|
06ffd4882d | ||
|
|
19a5e1d63b | ||
|
|
ec88b66dad | ||
|
|
aa2d7f00c2 | ||
|
|
3e125443aa | ||
|
|
3c271b8b1e | ||
|
|
6d92ba2c6c | ||
|
|
c26dc69e1b | ||
|
|
b0de0b4609 | ||
|
|
f54641511a | ||
|
|
8cf76f5e11 | ||
|
|
18c24014da | ||
|
|
0ae39bda8d | ||
|
|
051685baa1 | ||
|
|
07f53aebdc | ||
|
|
bd6d36b3f6 | ||
|
|
b168479429 | ||
|
|
b447b0cd78 | ||
|
|
4bd2386632 | ||
|
|
83b6b62c1b | ||
|
|
5826cc9d9e | ||
|
|
252432ae54 | ||
|
|
8821629333 | ||
|
|
a2092a8faf | ||
|
|
2b6f4241b4 | ||
|
|
e3dd16d490 | ||
|
|
9e1fd923f6 | ||
|
|
3684789858 | ||
|
|
1ac1dd428a | ||
|
|
65dbd29c22 | ||
|
|
164774d7e1 | ||
|
|
507020f408 | ||
|
|
a667e03fc9 | ||
|
|
1045344943 | ||
|
|
5e469db420 | ||
|
|
946e84d194 | ||
|
|
162aba60eb | ||
|
|
d8a874c32b | ||
|
|
9c451d9ac6 | ||
|
|
8f3f24672c | ||
|
|
0eb7b716d9 | ||
|
|
ee9684e60f | ||
|
|
e0cbe413e1 | ||
|
|
2523dd6220 | ||
|
|
c504d97017 | ||
|
|
b783f09fc6 | ||
|
|
a301478a13 | ||
|
|
63b450a2a5 | ||
|
|
843b77aaaa | ||
|
|
3641727edb | ||
|
|
38e2f835ed | ||
|
|
bd4bbed592 | ||
|
|
994b501188 | ||
|
|
dfa9381814 | ||
|
|
2a85feda4b | ||
|
|
ad46af9168 | ||
|
|
2fed02211c | ||
|
|
237daa8aaf | ||
|
|
8aeca6c033 | ||
|
|
fd82876086 | ||
|
|
be1a668e95 | ||
|
|
9d4eab32d8 | ||
|
|
b4ba7b69b8 | ||
|
|
deef659aef | ||
|
|
4b99e84242 | ||
|
|
06bdf7c64c | ||
|
|
e1225b5729 | ||
|
|
871cc2f62d | ||
|
|
bc23bf11db | ||
|
|
b55575e622 | ||
|
|
328f0e7135 | ||
|
|
a52814eed9 | ||
|
|
071a9e882d | ||
|
|
4e2ca3330c | ||
|
|
408d9f3e7a | ||
|
|
0681a5c86a | ||
|
|
df09c3f555 | ||
|
|
c41814fd2d | ||
|
|
c2557cc432 | ||
|
|
3425726c50 | ||
|
|
46175a22d8 | ||
|
|
bcf0e15ad7 | ||
|
|
b91c349cd5 | ||
|
|
d252816706 | ||
|
|
729117af68 | ||
|
|
cd4d8d55ec | ||
|
|
f26abc89a6 | ||
|
|
b5abbe819f | ||
|
|
a351484997 | ||
|
|
099eff8891 | ||
|
|
c4cb167d4a | ||
|
|
38e100ab19 | ||
|
|
db95a0d6c3 | ||
|
|
df07964db3 | ||
|
|
fbc4c4ad9a | ||
|
|
4454b3e1ae | ||
|
|
f9321fccbb | ||
|
|
3c2252b7c0 | ||
|
|
51c952654c | ||
|
|
80e47be65f | ||
|
|
38dc3e93ee | ||
|
|
ba6730720d | ||
|
|
7735b5c694 | ||
|
|
d45b3479ee | ||
|
|
4c5df0a765 | ||
|
|
8c61a09be2 | ||
|
|
c217145cae | ||
|
|
4c93378f0e | ||
|
|
f9e54f96c3 | ||
|
|
af0fcd03cb | ||
|
|
00aafc957e | ||
|
|
29abbd2032 | ||
|
|
663b2d9c46 | ||
|
|
f36d6d01b5 | ||
|
|
0c03aa3a8b | ||
|
|
caa2b8bf40 | ||
|
|
421e840e37 | ||
|
|
9b57d27be4 | ||
|
|
627ac1b2d9 | ||
|
|
ae395d8cf0 | ||
|
|
f04520a6e3 | ||
|
|
c968c3e41c | ||
|
|
3661791e82 | ||
|
|
4198763c35 | ||
|
|
3de47b8ed4 | ||
|
|
71b8e1e80b | ||
|
|
7366e97dfc | ||
|
|
21ba4d9a18 | ||
|
|
96e099d8e7 | ||
|
|
2a31b5bdd6 | ||
|
|
9bdfe4c36f | ||
|
|
e3a2f539fe | ||
|
|
ffa943e31f | ||
|
|
b16f603c51 | ||
|
|
a7a8372976 | ||
|
|
9beb0677e4 | ||
|
|
e43b5ce5e5 | ||
|
|
97328e5755 | ||
|
|
7b7d780fff | ||
|
|
c2600f9e4d | ||
|
|
7bd853ce35 | ||
|
|
05150cfb1d | ||
|
|
25366268fe | ||
|
|
c08d48a50d | ||
|
|
454574e2cc | ||
|
|
9218a3eb00 | ||
|
|
1e4ef4b4d5 | ||
|
|
8d92f7d697 | ||
|
|
fd56abc5f2 | ||
|
|
b323bf34e2 | ||
|
|
e78e73eae7 | ||
|
|
f51a5eca2e | ||
|
|
39e2af7974 | ||
|
|
b3217adf08 | ||
|
|
074234119a | ||
|
|
6210e22ab5 | ||
|
|
940e99a929 | ||
|
|
79b6866b57 | ||
|
|
c142e3edcc | ||
|
|
5c646dff9a | ||
|
|
19dfaf7440 |
2
.github/ISSUE_TEMPLATE/Bug.md
vendored
2
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -9,7 +9,7 @@ We understand you are having a problem with rclone; we want to help you with tha
|
||||
|
||||
**STOP and READ**
|
||||
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
|
||||
Please show the effort you've put in to solving the problem and please be specific.
|
||||
Please show the effort you've put into solving the problem and please be specific.
|
||||
People are volunteering their time to help! Low effort posts are not likely to get good answers!
|
||||
|
||||
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
|
||||
|
||||
86
.github/workflows/build.yml
vendored
86
.github/workflows/build.yml
vendored
@@ -25,12 +25,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.14', 'go1.15', 'go1.16']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.16', 'go1.17']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.18.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -39,9 +39,16 @@ jobs:
|
||||
librclonetest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macOS-latest
|
||||
go: '1.17.x'
|
||||
os: macos-11
|
||||
go: '1.18.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -49,58 +56,41 @@ jobs:
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macOS-latest
|
||||
go: '1.17.x'
|
||||
os: macos-11
|
||||
go: '1.18.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_amd64
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.17.x'
|
||||
go: '1.18.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.17.x'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
build_flags: '-include "^windows/386" -cgo'
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.18.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.14
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.15
|
||||
os: ubuntu-latest
|
||||
go: '1.15.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.16
|
||||
os: ubuntu-latest
|
||||
go: '1.16.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.17
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -116,6 +106,7 @@ jobs:
|
||||
with:
|
||||
stable: 'false'
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
@@ -140,7 +131,7 @@ jobs:
|
||||
run: |
|
||||
brew update
|
||||
brew install --cask macfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
if: matrix.os == 'macos-11'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
shell: powershell
|
||||
@@ -183,6 +174,11 @@ jobs:
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Rclone version
|
||||
shell: bash
|
||||
run: |
|
||||
rclone version
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -251,14 +247,14 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go 1.16
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18.x
|
||||
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;23.1.7779620" | grep -v = || true
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
@@ -279,8 +275,8 @@ jobs:
|
||||
|
||||
- name: install gomobile
|
||||
run: |
|
||||
go get golang.org/x/mobile/cmd/gobind
|
||||
go get golang.org/x/mobile/cmd/gomobile
|
||||
go install golang.org/x/mobile/cmd/gobind@latest
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
@@ -289,7 +285,7 @@ jobs:
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
@@ -302,7 +298,7 @@ jobs:
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
@@ -315,7 +311,7 @@ jobs:
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
@@ -328,7 +324,7 @@ jobs:
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
|
||||
@@ -20,7 +20,7 @@ jobs:
|
||||
with:
|
||||
tag: beta
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
with:
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
|
||||
@@ -5,7 +5,7 @@ linters:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- revive
|
||||
#- revive
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
|
||||
@@ -223,7 +223,7 @@ find the results at https://pub.rclone.org/integration-tests/
|
||||
Rclone code is organised into a small number of top level directories
|
||||
with modules beneath.
|
||||
|
||||
* backend - the rclone backends for interfacing to cloud providers -
|
||||
* backend - the rclone backends for interfacing to cloud providers -
|
||||
* all - import this to load all the cloud providers
|
||||
* ...providers
|
||||
* bin - scripts for use while building or maintaining rclone
|
||||
@@ -233,7 +233,7 @@ with modules beneath.
|
||||
* cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||
* docs - the documentation and website
|
||||
* content - adjust these docs only - everything else is autogenerated
|
||||
* command - these are auto generated - edit the corresponding .go file
|
||||
* command - these are auto-generated - edit the corresponding .go file
|
||||
* fs - main rclone definitions - minimal amount of code
|
||||
* accounting - bandwidth limiting and statistics
|
||||
* asyncreader - an io.Reader which reads ahead
|
||||
@@ -299,7 +299,7 @@ the source file in the `Help:` field.
|
||||
countries, it looks better without an ending period/full stop character.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
|
||||
files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
|
||||
from those during the release process. See the `make doc` and `make
|
||||
website` targets in the Makefile if you are interested in how. You
|
||||
don't need to run these when adding a feature.
|
||||
@@ -350,7 +350,7 @@ And here is an example of a longer one:
|
||||
```
|
||||
mount: fix hang on errored upload
|
||||
|
||||
In certain circumstances if an upload failed then the mount could hang
|
||||
In certain circumstances, if an upload failed then the mount could hang
|
||||
indefinitely. This was fixed by closing the read pipe after the Put
|
||||
completed. This will cause the write side to return a pipe closed
|
||||
error fixing the hang.
|
||||
@@ -382,7 +382,7 @@ and `go.sum` in the same commit as your other changes.
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
GO111MODULE=on go get -u github.com/pkg/errors
|
||||
GO111MODULE=on go get -u golang.org/x/crypto
|
||||
|
||||
Check in a single commit as above.
|
||||
|
||||
@@ -425,8 +425,8 @@ Research
|
||||
Getting going
|
||||
|
||||
* Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||
* box is a good one to start from if you have a directory based remote
|
||||
* b2 is a good one to start from if you have a bucket based remote
|
||||
* box is a good one to start from if you have a directory-based remote
|
||||
* b2 is a good one to start from if you have a bucket-based remote
|
||||
* Add your remote to the imports in `backend/all/all.go`
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
|
||||
@@ -15,11 +15,11 @@ Current active maintainers of rclone are:
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | tardigrade backend |
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do.
|
||||
This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
|
||||
|
||||
## Triaging Tickets ##
|
||||
|
||||
@@ -27,15 +27,15 @@ When a ticket comes in it should be triaged. This means it should be classified
|
||||
|
||||
Rclone uses the labels like this:
|
||||
|
||||
* `bug` - a definite verified bug
|
||||
* `bug` - a definitely verified bug
|
||||
* `can't reproduce` - a problem which we can't reproduce
|
||||
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
||||
* `duplicate` - normally close these and ask the user to subscribe to the original
|
||||
* `enhancement: new remote` - a new rclone backend
|
||||
* `enhancement` - a new feature
|
||||
* `FUSE` - to do with `rclone mount` command
|
||||
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
|
||||
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
|
||||
* `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
|
||||
* `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
|
||||
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||
* `maintenance` - internal enhancement, code re-organisation, etc.
|
||||
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||
@@ -51,7 +51,7 @@ The milestones have these meanings:
|
||||
|
||||
* v1.XX - stuff we would like to fit into this release
|
||||
* v1.XX+1 - stuff we are leaving until the next release
|
||||
* Soon - stuff we think is a good idea - waiting to be scheduled to a release
|
||||
* Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||
|
||||
@@ -65,7 +65,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
|
||||
|
||||
Try to process pull requests promptly!
|
||||
|
||||
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
|
||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||
|
||||
@@ -81,15 +81,15 @@ Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
|
||||
|
||||
High impact regressions should be fixed before the next release.
|
||||
|
||||
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||
Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||
|
||||
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
||||
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||
|
||||
## Mailing list ##
|
||||
|
||||
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups.
|
||||
There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
|
||||
|
||||
## TODO ##
|
||||
|
||||
|
||||
9927
MANUAL.html
generated
9927
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
12642
MANUAL.txt
generated
12642
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
18
Makefile
18
Makefile
@@ -97,17 +97,21 @@ release_dep_linux:
|
||||
|
||||
# Get the release dependencies we only install on Windows
|
||||
release_dep_windows:
|
||||
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
|
||||
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
|
||||
|
||||
# Update dependencies
|
||||
showupdates:
|
||||
@echo "*** Direct dependencies that could be updated ***"
|
||||
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
|
||||
# Update direct dependencies only
|
||||
updatedirect:
|
||||
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
GO111MODULE=on go mod tidy
|
||||
|
||||
# Update direct and indirect dependencies and test dependencies
|
||||
update:
|
||||
GO111MODULE=on go get -u -t ./...
|
||||
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
GO111MODULE=on go get -d -u -t ./...
|
||||
GO111MODULE=on go mod tidy
|
||||
|
||||
# Tidy the module dependencies
|
||||
@@ -241,18 +245,18 @@ retag:
|
||||
startdev:
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next version is $(NEXT_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_VERSION)\"\n" | gofmt > fs/versiontag.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
startstable:
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_PATCH_VERSION)\"\n" | gofmt > fs/versiontag.go
|
||||
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
39
README.md
39
README.md
@@ -1,8 +1,9 @@
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
[Download](https://rclone.org/downloads/) |
|
||||
[Download](https://rclone.org/downloads/) |
|
||||
[Contributing](CONTRIBUTING.md) |
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
@@ -10,24 +11,29 @@
|
||||
|
||||
[](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
|
||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||
[](https://godoc.org/github.com/rclone/rclone)
|
||||
[](https://godoc.org/github.com/rclone/rclone)
|
||||
[](https://hub.docker.com/r/rclone/rclone)
|
||||
|
||||
# Rclone
|
||||
|
||||
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
|
||||
Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
|
||||
|
||||
## Storage providers
|
||||
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
@@ -36,8 +42,11 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
@@ -59,22 +68,36 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
|
||||
### Virtual storage providers
|
||||
|
||||
These backends adapt or modify other storage providers
|
||||
|
||||
* Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
||||
* Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
||||
* Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
||||
* Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
||||
* Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
||||
* Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
||||
* Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
||||
* Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
||||
|
||||
## Features
|
||||
|
||||
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||
@@ -89,7 +112,7 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
* Multi-threaded downloads to local disk
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
|
||||
|
||||
## Installation & documentation
|
||||
|
||||
@@ -110,5 +133,5 @@ Please see the [rclone website](https://rclone.org/) for:
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
This is free software under the terms of the MIT license (check the
|
||||
[COPYING file](/COPYING) included in this package).
|
||||
|
||||
17
RELEASE.md
17
RELEASE.md
@@ -34,13 +34,24 @@ This file describes how to make the various kinds of releases
|
||||
* make startdev # make startstable for stable branch
|
||||
* # announce with forum post, twitter post, patreon post
|
||||
|
||||
## Update dependencies
|
||||
|
||||
Early in the next release cycle update the dependencies
|
||||
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
* make updatedirect
|
||||
* make
|
||||
* git commit -a -v
|
||||
* make update
|
||||
* make
|
||||
* roll back any updates which didn't compile
|
||||
* git commit -a -v --amend
|
||||
|
||||
Note that `make update` updates all direct and indirect dependencies
|
||||
and there can occasionally be forwards compatibility problems with
|
||||
doing that so it may be necessary to roll back dependencies to the
|
||||
version specified by `make updatedirect` in order to get rclone to
|
||||
build.
|
||||
|
||||
## Making a point release
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/chunker"
|
||||
_ "github.com/rclone/rclone/backend/combine"
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
@@ -20,14 +21,17 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/hasher"
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/hidrive"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/mailru"
|
||||
_ "github.com/rclone/rclone/backend/mega"
|
||||
_ "github.com/rclone/rclone/backend/memory"
|
||||
_ "github.com/rclone/rclone/backend/netstorage"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
@@ -39,9 +43,9 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/storj"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/uptobox"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
|
||||
@@ -14,6 +14,7 @@ we ignore assets completely!
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -22,7 +23,6 @@ import (
|
||||
"time"
|
||||
|
||||
acd "github.com/ncw/go-acd"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -259,7 +259,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
||||
return nil, fmt.Errorf("failed to configure Amazon Drive: %w", err)
|
||||
}
|
||||
|
||||
c := acd.NewClient(oAuthClient)
|
||||
@@ -292,13 +292,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get endpoints")
|
||||
return nil, fmt.Errorf("failed to get endpoints: %w", err)
|
||||
}
|
||||
|
||||
// Get rootID
|
||||
rootInfo, err := f.getRootInfo(ctx)
|
||||
if err != nil || rootInfo.Id == nil {
|
||||
return nil, errors.Wrap(err, "failed to get root")
|
||||
return nil, fmt.Errorf("failed to get root: %w", err)
|
||||
}
|
||||
f.trueRootID = *rootInfo.Id
|
||||
|
||||
@@ -435,7 +435,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, title string, directorie
|
||||
query += " AND kind:" + folderKind
|
||||
} else if filesOnly {
|
||||
query += " AND kind:" + fileKind
|
||||
} else {
|
||||
//} else {
|
||||
// FIXME none of these work
|
||||
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
|
||||
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -24,8 +25,8 @@ import (
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
@@ -43,15 +44,14 @@ import (
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 10 * time.Second
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxListChunkSize = 5000 // number of items to read at once
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxListChunkSize = 5000 // number of items to read at once
|
||||
maxUploadParts = 50000 // maximum allowed number of parts/blocks in a multi-part upload
|
||||
modTimeKey = "mtime"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
defaultChunkSize = 4 * fs.Mebi
|
||||
maxChunkSize = 100 * fs.Mebi
|
||||
uploadConcurrency = 4
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||
// Default storage account, key and blob endpoint for emulator support,
|
||||
@@ -134,12 +134,33 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size (<= 100 MiB).
|
||||
Help: `Upload chunk size.
|
||||
|
||||
Note that this is stored in memory and there may be up to
|
||||
"--transfers" chunks stored at once in memory.`,
|
||||
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
|
||||
in memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed
|
||||
links and these uploads do not fully utilize your bandwidth, then
|
||||
increasing this may help to speed up the transfers.
|
||||
|
||||
In tests, upload speed increases almost linearly with upload
|
||||
concurrency. For example to fill a gigabit pipe it may be necessary to
|
||||
raise this to 64. Note that this will use more memory.
|
||||
|
||||
Note that chunks are stored in memory and there may be up to
|
||||
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
|
||||
in memory.`,
|
||||
Default: 16,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Help: `Size of blob list.
|
||||
@@ -257,6 +278,7 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
ArchiveTierDelete bool `config:"archive_tier_delete"`
|
||||
@@ -351,15 +373,9 @@ func (o *Object) split() (container, containerPath string) {
|
||||
|
||||
// validateAccessTier checks if azureblob supports user supplied tier
|
||||
func validateAccessTier(tier string) bool {
|
||||
switch tier {
|
||||
case string(azblob.AccessTierHot),
|
||||
string(azblob.AccessTierCool),
|
||||
string(azblob.AccessTierArchive):
|
||||
// valid cases
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return strings.EqualFold(tier, string(azblob.AccessTierHot)) ||
|
||||
strings.EqualFold(tier, string(azblob.AccessTierCool)) ||
|
||||
strings.EqualFold(tier, string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
// validatePublicAccess checks if azureblob supports use supplied public access level
|
||||
@@ -414,10 +430,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.SizeSuffixBase
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
if cs > maxChunkSize {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -459,11 +472,11 @@ const azureStorageEndpoint = "https://storage.azure.com/"
|
||||
func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []byte) (azblob.TokenRefresher, error) {
|
||||
var spCredentials servicePrincipalCredentials
|
||||
if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
|
||||
return nil, errors.Wrap(err, "error parsing credentials from JSON file")
|
||||
return nil, fmt.Errorf("error parsing credentials from JSON file: %w", err)
|
||||
}
|
||||
oauthConfig, err := adal.NewOAuthConfig(azureActiveDirectoryEndpoint, spCredentials.Tenant)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating oauth config")
|
||||
return nil, fmt.Errorf("error creating oauth config: %w", err)
|
||||
}
|
||||
|
||||
// Create service principal token for Azure Storage.
|
||||
@@ -473,7 +486,7 @@ func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []by
|
||||
spCredentials.Password,
|
||||
azureStorageEndpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating service principal token")
|
||||
return nil, fmt.Errorf("error creating service principal token: %w", err)
|
||||
}
|
||||
|
||||
// Wrap token inside a refresher closure.
|
||||
@@ -526,10 +539,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "azure: chunk size")
|
||||
return nil, fmt.Errorf("chunk size: %w", err)
|
||||
}
|
||||
if opt.ListChunkSize > maxListChunkSize {
|
||||
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||
return nil, fmt.Errorf("blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||
}
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = storageDefaultBaseURL
|
||||
@@ -538,12 +551,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.AccessTier == "" {
|
||||
opt.AccessTier = string(defaultAccessTier)
|
||||
} else if !validateAccessTier(opt.AccessTier) {
|
||||
return nil, errors.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
|
||||
return nil, fmt.Errorf("supported access tiers are %s, %s and %s",
|
||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
if !validatePublicAccess((opt.PublicAccess)) {
|
||||
return nil, errors.Errorf("Azure Blob: Supported public access level are %s and %s",
|
||||
return nil, fmt.Errorf("supported public access level are %s and %s",
|
||||
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
|
||||
}
|
||||
|
||||
@@ -585,17 +598,21 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
case opt.UseEmulator:
|
||||
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to parse credentials")
|
||||
return nil, fmt.Errorf("failed to parse credentials: %w", err)
|
||||
}
|
||||
u, err = url.Parse(emulatorBlobEndpoint)
|
||||
var actualEmulatorEndpoint = emulatorBlobEndpoint
|
||||
if opt.Endpoint != "" {
|
||||
actualEmulatorEndpoint = opt.Endpoint
|
||||
}
|
||||
u, err = url.Parse(actualEmulatorEndpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
||||
}
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
case opt.UseMSI:
|
||||
var token adal.Token
|
||||
var userMSI *userMSI = &userMSI{}
|
||||
var userMSI = &userMSI{}
|
||||
if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 {
|
||||
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
|
||||
// Validate and ensure exactly one is set. (To do: better validation.)
|
||||
@@ -631,12 +648,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to acquire MSI token")
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
|
||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
||||
}
|
||||
credential := azblob.NewTokenCredential(token.AccessToken, func(credential azblob.TokenCredential) time.Duration {
|
||||
fs.Debugf(f, "Token refresher called.")
|
||||
@@ -666,19 +683,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
case opt.Account != "" && opt.Key != "":
|
||||
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to parse credentials")
|
||||
return nil, fmt.Errorf("failed to parse credentials: %w", err)
|
||||
}
|
||||
|
||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
||||
}
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
case opt.SASURL != "":
|
||||
u, err = url.Parse(opt.SASURL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
||||
return nil, fmt.Errorf("failed to parse SAS URL: %w", err)
|
||||
}
|
||||
// use anonymous credentials in case of sas url
|
||||
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
@@ -686,7 +703,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
parts := azblob.NewBlobURLParts(*u)
|
||||
if parts.ContainerName != "" {
|
||||
if f.rootContainer != "" && parts.ContainerName != f.rootContainer {
|
||||
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
||||
return nil, errors.New("container name in SAS URL and container provided in command do not match")
|
||||
}
|
||||
containerURL := azblob.NewContainerURL(*u, pipeline)
|
||||
f.cntURLcache[parts.ContainerName] = &containerURL
|
||||
@@ -698,23 +715,23 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Create a standard URL.
|
||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
||||
}
|
||||
// Try loading service principal credentials from file.
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening service principal credentials file")
|
||||
return nil, fmt.Errorf("error opening service principal credentials file: %w", err)
|
||||
}
|
||||
// Create a token refresher from service principal credentials.
|
||||
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, loadedCreds)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create a service principal token")
|
||||
return nil, fmt.Errorf("failed to create a service principal token: %w", err)
|
||||
}
|
||||
options := azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}
|
||||
pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options)
|
||||
serviceURL = azblob.NewServiceURL(*u, pipe)
|
||||
default:
|
||||
return nil, errors.New("No authentication method configured")
|
||||
return nil, errors.New("no authentication method configured")
|
||||
}
|
||||
f.svcURL = &serviceURL
|
||||
|
||||
@@ -1280,19 +1297,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||
if size == int64(f.opt.ChunkSize) {
|
||||
return f.pool
|
||||
}
|
||||
|
||||
return pool.New(
|
||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||
int(size),
|
||||
f.ci.Transfers,
|
||||
f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -1324,7 +1328,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
}
|
||||
data, err := base64.StdEncoding.DecodeString(o.md5)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "Failed to decode Content-MD5: %q", o.md5)
|
||||
return "", fmt.Errorf("failed to decode Content-MD5: %q: %w", o.md5, err)
|
||||
}
|
||||
return hex.EncodeToString(data), nil
|
||||
}
|
||||
@@ -1444,6 +1448,10 @@ func (o *Object) clearMetaData() {
|
||||
// o.size
|
||||
// o.md5
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
container, _ := o.split()
|
||||
if !o.fs.containerOK(container) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
@@ -1510,7 +1518,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var offset int64
|
||||
var count int64
|
||||
if o.AccessTier() == azblob.AccessTierArchive {
|
||||
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
|
||||
return nil, fmt.Errorf("blob in archive tier, you need to set tier to hot or cool first")
|
||||
}
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
@@ -1536,11 +1544,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open for download")
|
||||
return nil, fmt.Errorf("failed to open for download: %w", err)
|
||||
}
|
||||
err = o.decodeMetaDataFromDownloadResponse(downloadResponse)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode metadata for download")
|
||||
return nil, fmt.Errorf("failed to decode metadata for download: %w", err)
|
||||
}
|
||||
in = downloadResponse.Body(azblob.RetryReaderOptions{})
|
||||
return in, nil
|
||||
@@ -1630,13 +1638,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
fs.Debugf(o, "deleting archive tier blob before updating")
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to delete archive blob before updating")
|
||||
return fmt.Errorf("failed to delete archive blob before updating: %w", err)
|
||||
}
|
||||
} else {
|
||||
return errCantUpdateArchiveTierBlobs
|
||||
}
|
||||
}
|
||||
container, _ := o.split()
|
||||
container, containerPath := o.split()
|
||||
if container == "" || containerPath == "" {
|
||||
return fmt.Errorf("can't upload to root - need a container")
|
||||
}
|
||||
err = o.fs.makeContainer(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1665,12 +1676,21 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
|
||||
uploadParts := int64(maxUploadParts)
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
// calculate size of parts/blocks
|
||||
partSize := chunksize.Calculator(o, int(uploadParts), o.fs.opt.ChunkSize)
|
||||
|
||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: int(o.fs.opt.ChunkSize),
|
||||
MaxBuffers: uploadConcurrency,
|
||||
BufferSize: int(partSize),
|
||||
MaxBuffers: o.fs.opt.UploadConcurrency,
|
||||
Metadata: o.meta,
|
||||
BlobHTTPHeaders: httpHeaders,
|
||||
TransferManager: o.fs.newPoolWrapper(uploadConcurrency),
|
||||
TransferManager: o.fs.newPoolWrapper(o.fs.opt.UploadConcurrency),
|
||||
}
|
||||
|
||||
// Don't retry, return a retry error instead
|
||||
@@ -1723,7 +1743,7 @@ func (o *Object) AccessTier() azblob.AccessTierType {
|
||||
// SetTier performs changing object tier
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
if !validateAccessTier(tier) {
|
||||
return errors.Errorf("Tier %s not supported by Azure Blob Storage", tier)
|
||||
return fmt.Errorf("tier %s not supported by Azure Blob Storage", tier)
|
||||
}
|
||||
|
||||
// Check if current tier already matches with desired tier
|
||||
@@ -1734,12 +1754,12 @@ func (o *Object) SetTier(tier string) error {
|
||||
blob := o.getBlobReference()
|
||||
ctx := context.Background()
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{})
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{}, azblob.RehydratePriorityStandard)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to set Blob Tier")
|
||||
return fmt.Errorf("failed to set Blob Tier: %w", err)
|
||||
}
|
||||
|
||||
// Set access tier on local object also, this typically
|
||||
|
||||
@@ -17,12 +17,10 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -63,3 +61,25 @@ func TestServicePrincipalFileFailure(t *testing.T) {
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
||||
}
|
||||
|
||||
func TestValidateAccessTier(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
accessTier string
|
||||
want bool
|
||||
}{
|
||||
"hot": {"hot", true},
|
||||
"HOT": {"HOT", true},
|
||||
"Hot": {"Hot", true},
|
||||
"cool": {"cool", true},
|
||||
"archive": {"archive", true},
|
||||
"empty": {"", false},
|
||||
"unknown": {"unknown", false},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got := validateAccessTier(test.accessTier)
|
||||
assert.Equal(t, test.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
@@ -95,7 +94,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
httpClient := fshttp.NewClient(ctx)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return result, errors.Wrap(err, "MSI is not enabled on this VM")
|
||||
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
|
||||
}
|
||||
defer func() { // resp and Body should not be nil
|
||||
_, err = io.Copy(ioutil.Discard, resp.Body)
|
||||
@@ -120,7 +119,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return result, errors.Wrap(err, "Couldn't read IMDS response")
|
||||
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
|
||||
}
|
||||
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||
@@ -131,7 +130,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
// storage API call.
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
|
||||
return result, fmt.Errorf("couldn't unmarshal IMDS response: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"errors"
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
@@ -19,7 +20,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
@@ -64,7 +64,8 @@ const (
|
||||
|
||||
// Globals
|
||||
var (
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -106,6 +107,11 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
||||
Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "version_at",
|
||||
Help: "Show file versions as they were at the specified time.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Default: fs.Time{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Permanently delete files on remote removal, otherwise hide files.",
|
||||
@@ -160,7 +166,15 @@ free egress for data downloaded through the Cloudflare network.
|
||||
Rclone works with private buckets by sending an "Authorization" header.
|
||||
If the custom endpoint rewrites the requests for authentication,
|
||||
e.g., in Cloudflare Workers, this header needs to be handled properly.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.
|
||||
|
||||
The URL provided here SHOULD have the protocol and SHOULD NOT have
|
||||
a trailing slash or specify the /file/bucket subpath as rclone will
|
||||
request files with "{download_url}/file/{bucket_name}/{path}".
|
||||
|
||||
Example:
|
||||
> https://mysubdomain.mydomain.tld
|
||||
(No trailing "/", "file" or "bucket")`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_auth_duration",
|
||||
@@ -203,6 +217,7 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
VersionAt fs.Time `config:"version_at"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
@@ -265,7 +280,7 @@ func (f *Fs) Root() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return fmt.Sprintf("B2 root")
|
||||
return "B2 root"
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("B2 bucket %s", f.rootBucket)
|
||||
@@ -366,7 +381,7 @@ func errorHandler(resp *http.Response) error {
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -381,7 +396,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
|
||||
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
|
||||
if cs < opt.ChunkSize {
|
||||
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
|
||||
return fmt.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -414,11 +429,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
err = checkUploadCutoff(opt, opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2: upload cutoff")
|
||||
return nil, fmt.Errorf("b2: upload cutoff: %w", err)
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2: chunk size")
|
||||
return nil, fmt.Errorf("b2: chunk size: %w", err)
|
||||
}
|
||||
if opt.Account == "" {
|
||||
return nil, errors.New("account not found")
|
||||
@@ -463,7 +478,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
err = f.authorizeAccount(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to authorize account")
|
||||
return nil, fmt.Errorf("failed to authorize account: %w", err)
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
|
||||
@@ -472,7 +487,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
if allowedBucket != f.rootBucket {
|
||||
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
}
|
||||
f.cache.MarkOK(f.rootBucket)
|
||||
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
|
||||
@@ -512,7 +527,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
return f.shouldRetryNoReauth(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to authenticate")
|
||||
return fmt.Errorf("failed to authenticate: %w", err)
|
||||
}
|
||||
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
||||
return nil
|
||||
@@ -558,7 +573,7 @@ func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUp
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
||||
}
|
||||
return upload, nil
|
||||
}
|
||||
@@ -688,9 +703,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
Method: "POST",
|
||||
Path: "/b2_list_file_names",
|
||||
}
|
||||
if hidden {
|
||||
if hidden || f.opt.VersionAt.IsSet() {
|
||||
opts.Path = "/b2_list_file_versions"
|
||||
}
|
||||
|
||||
lastFileName := ""
|
||||
|
||||
for {
|
||||
var response api.ListFileNamesResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -720,7 +738,21 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
|
||||
if f.opt.VersionAt.IsSet() {
|
||||
if time.Time(file.UploadTimestamp).After(time.Time(f.opt.VersionAt)) {
|
||||
// Ignore versions that were created after the specified time
|
||||
continue
|
||||
}
|
||||
|
||||
if file.Name == lastFileName {
|
||||
// Ignore versions before the already returned version
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Send object
|
||||
lastFileName = file.Name
|
||||
err = fn(remote, file, isDirectory)
|
||||
if err != nil {
|
||||
if err == errEndList {
|
||||
@@ -1048,7 +1080,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.Wrap(err, "failed to create bucket")
|
||||
return fmt.Errorf("failed to create bucket: %w", err)
|
||||
}
|
||||
f.setBucketID(bucket, response.ID)
|
||||
f.setBucketType(bucket, response.Type)
|
||||
@@ -1083,7 +1115,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to delete bucket")
|
||||
return fmt.Errorf("failed to delete bucket: %w", err)
|
||||
}
|
||||
f.clearBucketID(bucket)
|
||||
f.clearBucketType(bucket)
|
||||
@@ -1124,7 +1156,7 @@ func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Wrapf(err, "failed to hide %q", bucketPath)
|
||||
return fmt.Errorf("failed to hide %q: %w", bucketPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1145,7 +1177,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to delete %q", Name)
|
||||
return fmt.Errorf("failed to delete %q: %w", Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1173,10 +1205,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
}
|
||||
}
|
||||
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
||||
if time.Since(time.Time(timestamp)).Hours() > 24 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return time.Since(time.Time(timestamp)).Hours() > 24
|
||||
}
|
||||
|
||||
// Delete Config.Transfers in parallel
|
||||
@@ -1364,7 +1393,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get download authorization")
|
||||
return "", fmt.Errorf("failed to get download authorization: %w", err)
|
||||
}
|
||||
return response.AuthorizationToken, nil
|
||||
}
|
||||
@@ -1453,13 +1482,9 @@ func (o *Object) Size() int64 {
|
||||
//
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Some tools (e.g. Cyberduck) use this
|
||||
func cleanSHA1(sha1 string) (out string) {
|
||||
out = strings.ToLower(sha1)
|
||||
func cleanSHA1(sha1 string) string {
|
||||
const unverified = "unverified:"
|
||||
if strings.HasPrefix(out, unverified) {
|
||||
out = out[len(unverified):]
|
||||
}
|
||||
return out
|
||||
return strings.TrimPrefix(strings.ToLower(sha1), unverified)
|
||||
}
|
||||
|
||||
// decodeMetaDataRaw sets the metadata from the data passed in
|
||||
@@ -1669,14 +1694,14 @@ func (file *openFile) Close() (err error) {
|
||||
|
||||
// Check to see we read the correct number of bytes
|
||||
if file.o.Size() != file.bytes {
|
||||
return errors.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
|
||||
return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
|
||||
}
|
||||
|
||||
// Check the SHA1
|
||||
receivedSHA1 := file.o.sha1
|
||||
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
|
||||
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
|
||||
return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
|
||||
return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1716,7 +1741,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
|
||||
return nil, nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, nil, errors.Wrapf(err, "failed to %s for download", method)
|
||||
return nil, nil, fmt.Errorf("failed to %s for download: %w", method, err)
|
||||
}
|
||||
|
||||
// NB resp may be Open here - don't return err != nil without closing
|
||||
@@ -1820,6 +1845,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
return errNotWithVersionAt
|
||||
}
|
||||
size := src.Size()
|
||||
|
||||
bucket, bucketPath := o.split()
|
||||
@@ -1975,6 +2003,9 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
return errNotWithVersionAt
|
||||
}
|
||||
if o.fs.opt.HardDelete {
|
||||
return o.fs.deleteByID(ctx, o.id, bucketPath)
|
||||
}
|
||||
|
||||
@@ -15,10 +15,10 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -89,21 +89,19 @@ type largeUpload struct {
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
//
|
||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
remote := o.remote
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
chunkSize := defaultChunkSize
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(src, maxParts, defaultChunkSize)
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
}
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
|
||||
@@ -185,7 +183,7 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
||||
}
|
||||
} else {
|
||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||
@@ -406,7 +404,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (e
|
||||
up.size += int64(n)
|
||||
if part > maxParts {
|
||||
up.f.putBuf(buf, false)
|
||||
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"crypto/rsa"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -26,13 +27,6 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
|
||||
"github.com/youmark/pkcs8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -43,9 +37,13 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
@@ -93,7 +91,7 @@ func init() {
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure token with jwt authentication")
|
||||
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
|
||||
}
|
||||
// Else, if not using an access token, use oauth2
|
||||
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
||||
@@ -167,15 +165,15 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
jsonFile = env.ShellExpand(jsonFile)
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get box config")
|
||||
return fmt.Errorf("get box config: %w", err)
|
||||
}
|
||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get decrypted private key")
|
||||
return fmt.Errorf("get decrypted private key: %w", err)
|
||||
}
|
||||
claims, err := getClaims(boxConfig, boxSubType)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get claims")
|
||||
return fmt.Errorf("get claims: %w", err)
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
@@ -187,11 +185,11 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
file, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to read Box config")
|
||||
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
||||
}
|
||||
err = json.Unmarshal(file, &boxConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to parse Box config")
|
||||
return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
|
||||
}
|
||||
return boxConfig, nil
|
||||
}
|
||||
@@ -199,7 +197,7 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
||||
val, err := jwtutil.RandomHex(20)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
|
||||
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
|
||||
}
|
||||
|
||||
claims = &jws.ClaimSet{
|
||||
@@ -240,12 +238,12 @@ func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err
|
||||
|
||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||
if len(rest) > 0 {
|
||||
return nil, errors.Wrap(err, "box: extra data included in private key")
|
||||
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||
}
|
||||
|
||||
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to decrypt private key")
|
||||
return nil, fmt.Errorf("box: failed to decrypt private key: %w", err)
|
||||
}
|
||||
|
||||
return rsaKey.(*rsa.PrivateKey), nil
|
||||
@@ -403,7 +401,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
if opt.UploadCutoff < minUploadCutoff {
|
||||
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||
return nil, fmt.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
@@ -414,7 +412,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.AccessToken == "" {
|
||||
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
return nil, fmt.Errorf("failed to configure Box: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -613,7 +611,7 @@ OUTER:
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
return found, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
for i := range result.Entries {
|
||||
item := &result.Entries[i]
|
||||
@@ -740,14 +738,14 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
|
||||
var conflict api.PreUploadCheckConflict
|
||||
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "pre-upload check: JSON decode failed")
|
||||
return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
|
||||
}
|
||||
if conflict.Conflicts.Type != api.ItemTypeFile {
|
||||
return "", errors.Wrap(err, "pre-upload check: can't overwrite non file with file")
|
||||
return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
|
||||
}
|
||||
return conflict.Conflicts.ID, nil
|
||||
}
|
||||
return "", errors.Wrap(err, "pre-upload check")
|
||||
return "", fmt.Errorf("pre-upload check: %w", err)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
@@ -856,7 +854,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rmdir failed")
|
||||
return fmt.Errorf("rmdir failed: %w", err)
|
||||
}
|
||||
f.dirCache.FlushDir(dir)
|
||||
if err != nil {
|
||||
@@ -899,8 +897,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
||||
dstPath := f.rootSlash() + remote
|
||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
if strings.EqualFold(srcPath, dstPath) {
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
@@ -984,7 +982,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read user info")
|
||||
return nil, fmt.Errorf("failed to read user info: %w", err)
|
||||
}
|
||||
// FIXME max upload size would be useful to use in Update
|
||||
usage = &fs.Usage{
|
||||
@@ -1145,7 +1143,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
})
|
||||
wg.Wait()
|
||||
if deleteErrors != 0 {
|
||||
return errors.Errorf("failed to delete %d trash items", deleteErrors)
|
||||
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1205,7 +1203,7 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
if info.Type != api.ItemTypeFile {
|
||||
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
||||
return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile)
|
||||
}
|
||||
o.hasMetaData = true
|
||||
o.size = int64(info.Size)
|
||||
@@ -1341,7 +1339,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
||||
return err
|
||||
}
|
||||
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
||||
return errors.Errorf("failed to upload %v - not sure why", o)
|
||||
return fmt.Errorf("failed to upload %v - not sure why", o)
|
||||
}
|
||||
return o.setMetaData(&result.Entries[0])
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -15,7 +16,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
@@ -140,7 +140,7 @@ outer:
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
||||
return nil, fmt.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
|
||||
@@ -151,7 +151,7 @@ outer:
|
||||
}
|
||||
err = json.Unmarshal(body, &result)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
|
||||
return nil, fmt.Errorf("couldn't decode commit response: %q: %w", body, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -177,7 +177,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
||||
// Create upload session
|
||||
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload create session failed")
|
||||
return fmt.Errorf("multipart upload create session failed: %w", err)
|
||||
}
|
||||
chunkSize := session.PartSize
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
|
||||
@@ -222,7 +222,7 @@ outer:
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(in, buf)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to read source")
|
||||
err = fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
break outer
|
||||
}
|
||||
|
||||
@@ -238,7 +238,7 @@ outer:
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
err = fmt.Errorf("multipart upload failed to upload part: %w", err)
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
@@ -266,11 +266,11 @@ outer:
|
||||
// Finalise the upload session
|
||||
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||
return fmt.Errorf("multipart upload failed to finalize: %w", err)
|
||||
}
|
||||
|
||||
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
||||
return errors.Errorf("multipart upload failed %v - not sure why", o)
|
||||
return fmt.Errorf("multipart upload failed %v - not sure why", o)
|
||||
}
|
||||
return o.setMetaData(&result.Entries[0])
|
||||
}
|
||||
|
||||
56
backend/cache/cache.go
vendored
56
backend/cache/cache.go
vendored
@@ -5,6 +5,7 @@ package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@@ -19,7 +20,6 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
@@ -356,7 +356,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
|
||||
return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||
return nil, fmt.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
|
||||
}
|
||||
|
||||
@@ -366,13 +366,13 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
|
||||
rpath, err := parseRootPath(rootPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
||||
return nil, fmt.Errorf("failed to clean root path %q: %w", rootPath, err)
|
||||
}
|
||||
|
||||
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
|
||||
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
|
||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remotePath, wrapErr)
|
||||
}
|
||||
var fsErr error
|
||||
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
||||
@@ -394,14 +394,18 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
notifiedRemotes: make(map[string]bool),
|
||||
}
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
||||
rps := rate.Inf
|
||||
if opt.Rps > 0 {
|
||||
rps = rate.Limit(float64(opt.Rps))
|
||||
}
|
||||
f.rateLimiter = rate.NewLimiter(rps, opt.TotalWorkers)
|
||||
|
||||
f.plexConnector = &plexConnector{}
|
||||
if opt.PlexURL != "" {
|
||||
if opt.PlexToken != "" {
|
||||
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
} else {
|
||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
@@ -413,7 +417,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -434,11 +438,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
}
|
||||
err = os.MkdirAll(dbPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", dbPath)
|
||||
return nil, fmt.Errorf("failed to create cache directory %v: %w", dbPath, err)
|
||||
}
|
||||
err = os.MkdirAll(chunkPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", chunkPath)
|
||||
return nil, fmt.Errorf("failed to create cache directory %v: %w", chunkPath, err)
|
||||
}
|
||||
|
||||
dbPath = filepath.Join(dbPath, name+".db")
|
||||
@@ -450,7 +454,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
DbWaitTime: time.Duration(opt.DbWaitTime),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to start cache db")
|
||||
return nil, fmt.Errorf("failed to start cache db: %w", err)
|
||||
}
|
||||
// Trap SIGINT and SIGTERM to close the DB handle gracefully
|
||||
c := make(chan os.Signal, 1)
|
||||
@@ -484,12 +488,12 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
if f.opt.TempWritePath != "" {
|
||||
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
||||
return nil, fmt.Errorf("failed to create cache directory %v: %w", f.opt.TempWritePath, err)
|
||||
}
|
||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
||||
return nil, fmt.Errorf("failed to create temp fs: %w", err)
|
||||
}
|
||||
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
|
||||
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
|
||||
@@ -606,7 +610,7 @@ func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err er
|
||||
out = make(rc.Params)
|
||||
m, err := f.Stats()
|
||||
if err != nil {
|
||||
return out, errors.Errorf("error while getting cache stats")
|
||||
return out, fmt.Errorf("error while getting cache stats")
|
||||
}
|
||||
out["status"] = "ok"
|
||||
out["stats"] = m
|
||||
@@ -633,7 +637,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
||||
out = make(rc.Params)
|
||||
remoteInt, ok := in["remote"]
|
||||
if !ok {
|
||||
return out, errors.Errorf("remote is needed")
|
||||
return out, fmt.Errorf("remote is needed")
|
||||
}
|
||||
remote := remoteInt.(string)
|
||||
withData := false
|
||||
@@ -644,7 +648,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
||||
|
||||
remote = f.unwrapRemote(remote)
|
||||
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
|
||||
return out, errors.Errorf("%s doesn't exist in cache", remote)
|
||||
return out, fmt.Errorf("%s doesn't exist in cache", remote)
|
||||
}
|
||||
|
||||
co := NewObject(f, remote)
|
||||
@@ -653,7 +657,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
||||
cd := NewDirectory(f, remote)
|
||||
err := f.cache.ExpireDir(cd)
|
||||
if err != nil {
|
||||
return out, errors.WithMessage(err, "error expiring directory")
|
||||
return out, fmt.Errorf("error expiring directory: %w", err)
|
||||
}
|
||||
// notify vfs too
|
||||
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
|
||||
@@ -664,7 +668,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
||||
// expire the entry
|
||||
err = f.cache.ExpireObject(co, withData)
|
||||
if err != nil {
|
||||
return out, errors.WithMessage(err, "error expiring file")
|
||||
return out, fmt.Errorf("error expiring file: %w", err)
|
||||
}
|
||||
// notify vfs too
|
||||
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
|
||||
@@ -685,24 +689,24 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
case 1:
|
||||
start, err = strconv.ParseInt(ints[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
return nil, fmt.Errorf("invalid range: %q", part)
|
||||
}
|
||||
end = start + 1
|
||||
case 2:
|
||||
if ints[0] != "" {
|
||||
start, err = strconv.ParseInt(ints[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
return nil, fmt.Errorf("invalid range: %q", part)
|
||||
}
|
||||
}
|
||||
if ints[1] != "" {
|
||||
end, err = strconv.ParseInt(ints[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
return nil, fmt.Errorf("invalid range: %q", part)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
return nil, fmt.Errorf("invalid range: %q", part)
|
||||
}
|
||||
crs = append(crs, chunkRange{start: start, end: end})
|
||||
}
|
||||
@@ -757,18 +761,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
delete(in, "chunks")
|
||||
crs, err := parseChunks(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid chunks parameter")
|
||||
return nil, fmt.Errorf("invalid chunks parameter: %w", err)
|
||||
}
|
||||
var files [][2]string
|
||||
for k, v := range in {
|
||||
if !strings.HasPrefix(k, "file") {
|
||||
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
|
||||
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
|
||||
}
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
files = append(files, [2]string{v, f.unwrapRemote(v)})
|
||||
default:
|
||||
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
|
||||
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
|
||||
}
|
||||
}
|
||||
type fileStatus struct {
|
||||
@@ -1124,7 +1128,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
case fs.Directory:
|
||||
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
||||
default:
|
||||
return errors.Errorf("Unknown object type %T", entry)
|
||||
return fmt.Errorf("unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1743,7 +1747,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
36
backend/cache/cache_internal_test.go
vendored
36
backend/cache/cache_internal_test.go
vendored
@@ -7,6 +7,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -22,7 +23,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/cache"
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
@@ -446,7 +446,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
return err
|
||||
}
|
||||
if coSize != expectedSize {
|
||||
return errors.Errorf("%v <> %v", coSize, expectedSize)
|
||||
return fmt.Errorf("%v <> %v", coSize, expectedSize)
|
||||
}
|
||||
return nil
|
||||
}, 12, time.Second*10)
|
||||
@@ -502,7 +502,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
}
|
||||
if len(li) != 2 {
|
||||
log.Printf("not expected listing /test: %v", li)
|
||||
return errors.Errorf("not expected listing /test: %v", li)
|
||||
return fmt.Errorf("not expected listing /test: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/one")
|
||||
@@ -512,7 +512,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
}
|
||||
if len(li) != 0 {
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
return errors.Errorf("not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/second")
|
||||
@@ -522,21 +522,21 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
}
|
||||
if len(li) != 1 {
|
||||
log.Printf("not expected listing /test/second: %v", li)
|
||||
return errors.Errorf("not expected listing /test/second: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/second: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "data.bin" {
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
return errors.Errorf("not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/second/data.bin" {
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
return errors.Errorf("not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
return errors.Errorf("unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
|
||||
log.Printf("complete listing: %v", li)
|
||||
@@ -591,17 +591,17 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
if !found {
|
||||
log.Printf("not found /test")
|
||||
return errors.Errorf("not found /test")
|
||||
return fmt.Errorf("not found /test")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one")
|
||||
return errors.Errorf("not found /test/one")
|
||||
return fmt.Errorf("not found /test/one")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one/test2")
|
||||
return errors.Errorf("not found /test/one/test2")
|
||||
return fmt.Errorf("not found /test/one/test2")
|
||||
}
|
||||
li, err := runInstance.list(t, rootFs, "test/one")
|
||||
if err != nil {
|
||||
@@ -610,21 +610,21 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
}
|
||||
if len(li) != 1 {
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
return errors.Errorf("not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "test2" {
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
return errors.Errorf("not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/one/test2" {
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
return errors.Errorf("not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
return errors.Errorf("unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
log.Printf("complete listing /test/one/test2")
|
||||
return nil
|
||||
@@ -1062,7 +1062,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
||||
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
|
||||
|
||||
if !noLengthCheck && size != int64(len(checkSample)) {
|
||||
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
|
||||
return checkSample, fmt.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
|
||||
}
|
||||
return checkSample, nil
|
||||
}
|
||||
@@ -1257,7 +1257,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
||||
case state = <-buCh:
|
||||
// continue
|
||||
case <-time.After(maxDuration):
|
||||
waitCh <- errors.Errorf("Timed out waiting for background upload: %v", remote)
|
||||
waitCh <- fmt.Errorf("Timed out waiting for background upload: %v", remote)
|
||||
return
|
||||
}
|
||||
checkRemote := state.Remote
|
||||
@@ -1274,7 +1274,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
||||
return
|
||||
}
|
||||
}
|
||||
waitCh <- errors.Errorf("Too many attempts to wait for the background upload: %v", remote)
|
||||
waitCh <- fmt.Errorf("Too many attempts to wait for the background upload: %v", remote)
|
||||
}()
|
||||
return waitCh
|
||||
}
|
||||
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -19,7 +19,7 @@ func TestIntegration(t *testing.T) {
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
}
|
||||
|
||||
6
backend/cache/handle.go
vendored
6
backend/cache/handle.go
vendored
@@ -5,6 +5,7 @@ package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
@@ -13,7 +14,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
@@ -243,7 +243,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("chunk not found %v", chunkStart)
|
||||
return nil, fmt.Errorf("chunk not found %v", chunkStart)
|
||||
}
|
||||
|
||||
// first chunk will be aligned with the start
|
||||
@@ -323,7 +323,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
|
||||
r.offset = r.cachedObject.Size() + offset
|
||||
default:
|
||||
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
||||
err = fmt.Errorf("cache: unimplemented seek whence %v", whence)
|
||||
}
|
||||
|
||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||
|
||||
14
backend/cache/object.go
vendored
14
backend/cache/object.go
vendored
@@ -5,12 +5,12 @@ package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -178,10 +178,14 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
|
||||
}
|
||||
if o.isTempFile() {
|
||||
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
|
||||
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("in parent fs %v: %w", o.ParentFs, err)
|
||||
}
|
||||
} else {
|
||||
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
|
||||
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("in cache fs %v: %w", o.CacheFs.Fs, err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
fs.Errorf(o, "error refreshing object in : %v", err)
|
||||
@@ -253,7 +257,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||
return errors.Errorf("%v is currently uploading, can't update", o)
|
||||
return fmt.Errorf("%v is currently uploading, can't update", o)
|
||||
}
|
||||
}
|
||||
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
||||
@@ -292,7 +296,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||
return errors.Errorf("%v is currently uploading, can't delete", o)
|
||||
return fmt.Errorf("%v is currently uploading, can't delete", o)
|
||||
}
|
||||
}
|
||||
err := o.Object.Remove(ctx)
|
||||
|
||||
2
backend/cache/plex.go
vendored
2
backend/cache/plex.go
vendored
@@ -213,7 +213,7 @@ func (p *plexConnector) authenticate() error {
|
||||
var data map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain token: %v", err)
|
||||
return fmt.Errorf("failed to obtain token: %w", err)
|
||||
}
|
||||
tokenGen, ok := get(data, "user", "authToken")
|
||||
if !ok {
|
||||
|
||||
9
backend/cache/storage_memory.go
vendored
9
backend/cache/storage_memory.go
vendored
@@ -4,12 +4,12 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
@@ -53,7 +53,7 @@ func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
|
||||
return nil, fmt.Errorf("couldn't get cached object data at offset %v", offset)
|
||||
}
|
||||
|
||||
// AddChunk adds a new chunk of a cached object
|
||||
@@ -76,10 +76,7 @@ func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
|
||||
|
||||
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
|
||||
func (m *Memory) CleanChunksByNeed(offset int64) {
|
||||
var items map[string]cache.Item
|
||||
|
||||
items = m.db.Items()
|
||||
for key := range items {
|
||||
for key := range m.db.Items() {
|
||||
sepIdx := strings.LastIndex(key, "-")
|
||||
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
|
||||
if err != nil {
|
||||
|
||||
88
backend/cache/storage_persistent.go
vendored
88
backend/cache/storage_persistent.go
vendored
@@ -17,7 +17,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
@@ -120,11 +119,11 @@ func (b *Persistent) connect() error {
|
||||
|
||||
err = os.MkdirAll(b.dataPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
|
||||
return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err)
|
||||
}
|
||||
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
|
||||
return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err)
|
||||
}
|
||||
if b.features.PurgeDb {
|
||||
b.Purge()
|
||||
@@ -176,7 +175,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
|
||||
err := b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(remote, false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", remote)
|
||||
return fmt.Errorf("couldn't open bucket (%v)", remote)
|
||||
}
|
||||
|
||||
data := bucket.Get([]byte("."))
|
||||
@@ -184,7 +183,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
|
||||
return json.Unmarshal(data, cd)
|
||||
}
|
||||
|
||||
return errors.Errorf("%v not found", remote)
|
||||
return fmt.Errorf("%v not found", remote)
|
||||
})
|
||||
|
||||
return cd, err
|
||||
@@ -209,7 +208,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
|
||||
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
|
||||
}
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
|
||||
return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
|
||||
}
|
||||
|
||||
for _, cachedDir := range cachedDirs {
|
||||
@@ -226,7 +225,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
|
||||
|
||||
encoded, err := json.Marshal(cachedDir)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
||||
return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
||||
}
|
||||
err = b.Put([]byte("."), encoded)
|
||||
if err != nil {
|
||||
@@ -244,17 +243,17 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
|
||||
err := b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cachedDir.abs(), false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", cachedDir.abs())
|
||||
return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs())
|
||||
}
|
||||
|
||||
val := bucket.Get([]byte("."))
|
||||
if val != nil {
|
||||
err := json.Unmarshal(val, cachedDir)
|
||||
if err != nil {
|
||||
return errors.Errorf("error during unmarshalling obj: %v", err)
|
||||
return fmt.Errorf("error during unmarshalling obj: %w", err)
|
||||
}
|
||||
} else {
|
||||
return errors.Errorf("missing cached dir: %v", cachedDir)
|
||||
return fmt.Errorf("missing cached dir: %v", cachedDir)
|
||||
}
|
||||
|
||||
c := bucket.Cursor()
|
||||
@@ -269,7 +268,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
|
||||
// we try to find a cached meta for the dir
|
||||
currentBucket := c.Bucket().Bucket(k)
|
||||
if currentBucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", string(k))
|
||||
return fmt.Errorf("couldn't open bucket (%v)", string(k))
|
||||
}
|
||||
|
||||
metaKey := currentBucket.Get([]byte("."))
|
||||
@@ -318,7 +317,7 @@ func (b *Persistent) RemoveDir(fp string) error {
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", fp)
|
||||
return fmt.Errorf("couldn't open bucket (%v)", fp)
|
||||
}
|
||||
// delete the cached dir
|
||||
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
|
||||
@@ -378,13 +377,13 @@ func (b *Persistent) GetObject(cachedObject *Object) (err error) {
|
||||
return b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cachedObject.Dir, false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
|
||||
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
|
||||
}
|
||||
val := bucket.Get([]byte(cachedObject.Name))
|
||||
if val != nil {
|
||||
return json.Unmarshal(val, cachedObject)
|
||||
}
|
||||
return errors.Errorf("couldn't find object (%v)", cachedObject.Name)
|
||||
return fmt.Errorf("couldn't find object (%v)", cachedObject.Name)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -393,16 +392,16 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cachedObject.Dir, true, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open parent bucket for %v", cachedObject)
|
||||
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject)
|
||||
}
|
||||
// cache Object Info
|
||||
encoded, err := json.Marshal(cachedObject)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||
return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||
return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -414,7 +413,7 @@ func (b *Persistent) RemoveObject(fp string) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
|
||||
return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
|
||||
}
|
||||
err := bucket.Delete([]byte(cleanPath(objName)))
|
||||
if err != nil {
|
||||
@@ -446,7 +445,7 @@ func (b *Persistent) HasEntry(remote string) bool {
|
||||
err := b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(dir, false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open parent bucket for %v", remote)
|
||||
return fmt.Errorf("couldn't open parent bucket for %v", remote)
|
||||
}
|
||||
if f := bucket.Bucket([]byte(name)); f != nil {
|
||||
return nil
|
||||
@@ -455,12 +454,9 @@ func (b *Persistent) HasEntry(remote string) bool {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Errorf("couldn't find object (%v)", remote)
|
||||
return fmt.Errorf("couldn't find object (%v)", remote)
|
||||
})
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// HasChunk confirms the existence of a single chunk of an object
|
||||
@@ -555,7 +551,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||
if dataTsBucket == nil {
|
||||
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
|
||||
return fmt.Errorf("couldn't open (%v) bucket", DataTsBucket)
|
||||
}
|
||||
// iterate through ts
|
||||
c := dataTsBucket.Cursor()
|
||||
@@ -733,7 +729,7 @@ func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Errorf("not found %v-%v", path, offset)
|
||||
return fmt.Errorf("not found %v-%v", path, offset)
|
||||
})
|
||||
|
||||
return t, err
|
||||
@@ -773,7 +769,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
tempObj := &tempUploadInfo{
|
||||
DestPath: destPath,
|
||||
@@ -784,11 +780,11 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
||||
// cache Object Info
|
||||
encoded, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||
return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||
}
|
||||
err = bucket.Put([]byte(destPath), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -803,7 +799,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
|
||||
c := bucket.Cursor()
|
||||
@@ -836,7 +832,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Errorf("no pending upload found")
|
||||
return fmt.Errorf("no pending upload found")
|
||||
})
|
||||
|
||||
return destPath, err
|
||||
@@ -847,14 +843,14 @@ func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(tempBucket))
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
|
||||
var tempObj = &tempUploadInfo{}
|
||||
v := bucket.Get([]byte(remote))
|
||||
err = json.Unmarshal(v, tempObj)
|
||||
if err != nil {
|
||||
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
}
|
||||
|
||||
started = tempObj.Started
|
||||
@@ -869,7 +865,7 @@ func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, e
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(tempBucket))
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
|
||||
c := bucket.Cursor()
|
||||
@@ -899,22 +895,22 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
var tempObj = &tempUploadInfo{}
|
||||
v := bucket.Get([]byte(remote))
|
||||
err = json.Unmarshal(v, tempObj)
|
||||
if err != nil {
|
||||
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
return fmt.Errorf("pending upload (%v) not found: %w", remote, err)
|
||||
}
|
||||
tempObj.Started = false
|
||||
v2, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return errors.Errorf("pending upload not updated %v", err)
|
||||
return fmt.Errorf("pending upload not updated: %w", err)
|
||||
}
|
||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||
if err != nil {
|
||||
return errors.Errorf("pending upload not updated %v", err)
|
||||
return fmt.Errorf("pending upload not updated: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -927,7 +923,7 @@ func (b *Persistent) removePendingUpload(remote string) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
return bucket.Delete([]byte(remote))
|
||||
})
|
||||
@@ -942,17 +938,17 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
|
||||
var tempObj = &tempUploadInfo{}
|
||||
v := bucket.Get([]byte(remote))
|
||||
err = json.Unmarshal(v, tempObj)
|
||||
if err != nil {
|
||||
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
}
|
||||
if tempObj.Started {
|
||||
return errors.Errorf("pending upload already started %v", remote)
|
||||
return fmt.Errorf("pending upload already started %v", remote)
|
||||
}
|
||||
err = fn(tempObj)
|
||||
if err != nil {
|
||||
@@ -970,11 +966,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
||||
}
|
||||
v2, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return errors.Errorf("pending upload not updated %v", err)
|
||||
return fmt.Errorf("pending upload not updated: %w", err)
|
||||
}
|
||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||
if err != nil {
|
||||
return errors.Errorf("pending upload not updated %v", err)
|
||||
return fmt.Errorf("pending upload not updated: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1015,11 +1011,11 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
|
||||
// cache Object Info
|
||||
encoded, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||
return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||
}
|
||||
err = bucket.Put([]byte(destPath), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
}
|
||||
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
@@ -21,7 +22,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
@@ -290,13 +290,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
|
||||
baseName, basePath, err := fspath.SplitFs(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := fspath.JoinRootPath(basePath, rpath)
|
||||
baseFs, err := cache.Get(ctx, baseName+remotePath)
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
|
||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", baseName+remotePath, err)
|
||||
}
|
||||
if !operations.CanServerSideMove(baseFs) {
|
||||
return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy")
|
||||
@@ -386,7 +386,7 @@ type Fs struct {
|
||||
// configure must be called only from NewFs or by unit tests.
|
||||
func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error {
|
||||
if err := f.setChunkNameFormat(nameFormat); err != nil {
|
||||
return errors.Wrapf(err, "invalid name format '%s'", nameFormat)
|
||||
return fmt.Errorf("invalid name format '%s': %w", nameFormat, err)
|
||||
}
|
||||
if err := f.setMetaFormat(metaFormat); err != nil {
|
||||
return err
|
||||
@@ -515,7 +515,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
||||
|
||||
strRegex := regexp.QuoteMeta(pattern)
|
||||
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
|
||||
strRegex = strings.Replace(strRegex, "\\*", mainNameRegStr, -1)
|
||||
strRegex = strings.ReplaceAll(strRegex, "\\*", mainNameRegStr)
|
||||
strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
|
||||
f.nameRegexp = regexp.MustCompile(strRegex)
|
||||
|
||||
@@ -524,7 +524,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
||||
if numDigits > 1 {
|
||||
fmtDigits = fmt.Sprintf("%%0%dd", numDigits)
|
||||
}
|
||||
strFmt := strings.Replace(pattern, "%", "%%", -1)
|
||||
strFmt := strings.ReplaceAll(pattern, "%", "%%")
|
||||
strFmt = strings.Replace(strFmt, "*", "%s", 1)
|
||||
f.dataNameFmt = reHashes.ReplaceAllLiteralString(strFmt, fmtDigits)
|
||||
f.ctrlNameFmt = reHashes.ReplaceAllLiteralString(strFmt, "_%s")
|
||||
@@ -878,7 +878,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// ignores non-chunked objects and skips chunk size checks.
|
||||
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
|
||||
if err := f.forbidChunk(false, remote); err != nil {
|
||||
return nil, errors.Wrap(err, "can't access")
|
||||
return nil, fmt.Errorf("can't access: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -927,7 +927,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
||||
case fs.ErrorDirNotFound:
|
||||
entries = nil
|
||||
default:
|
||||
return nil, errors.Wrap(err, "can't detect composite file")
|
||||
return nil, fmt.Errorf("can't detect composite file: %w", err)
|
||||
}
|
||||
|
||||
if f.useNoRename {
|
||||
@@ -1067,7 +1067,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
case ErrMetaTooBig, ErrMetaUnknown:
|
||||
return err // return these errors unwrapped for unit tests
|
||||
default:
|
||||
return errors.Wrap(err, "invalid metadata")
|
||||
return fmt.Errorf("invalid metadata: %w", err)
|
||||
}
|
||||
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
|
||||
return errors.New("metadata doesn't match file size")
|
||||
@@ -1132,7 +1132,7 @@ func (f *Fs) put(
|
||||
|
||||
// Perform consistency checks
|
||||
if err := f.forbidChunk(src, remote); err != nil {
|
||||
return nil, errors.Wrap(err, action+" refused")
|
||||
return nil, fmt.Errorf("%s refused: %w", action, err)
|
||||
}
|
||||
if target == nil {
|
||||
// Get target object with a quick directory scan
|
||||
@@ -1146,7 +1146,7 @@ func (f *Fs) put(
|
||||
obj := target.(*Object)
|
||||
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
|
||||
// refuse to update a file of unsupported format
|
||||
return nil, errors.Wrap(err, "refusing to "+action)
|
||||
return nil, fmt.Errorf("refusing to %s: %w", action, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1564,7 +1564,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
if err := f.forbidChunk(dir, dir); err != nil {
|
||||
return errors.Wrap(err, "can't mkdir")
|
||||
return fmt.Errorf("can't mkdir: %w", err)
|
||||
}
|
||||
return f.base.Mkdir(ctx, dir)
|
||||
}
|
||||
@@ -1633,7 +1633,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
||||
// operations.Move can still call Remove if chunker's Move refuses
|
||||
// to corrupt file in hard mode. Hence, refuse to Remove, too.
|
||||
return errors.Wrap(err, "refuse to corrupt")
|
||||
return fmt.Errorf("refuse to corrupt: %w", err)
|
||||
}
|
||||
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
|
||||
// Proceed but warn user that unexpected things can happen.
|
||||
@@ -1661,12 +1661,12 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// copyOrMove implements copy or move
|
||||
func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMoveFn, md5, sha1, opName string) (fs.Object, error) {
|
||||
if err := f.forbidChunk(o, remote); err != nil {
|
||||
return nil, errors.Wrapf(err, "can't %s", opName)
|
||||
return nil, fmt.Errorf("can't %s: %w", opName, err)
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// Refuse to copy/move composite files with invalid or future
|
||||
// metadata format which might involve unsupported chunk types.
|
||||
return nil, errors.Wrapf(err, "can't %s this file", opName)
|
||||
return nil, fmt.Errorf("can't %s this file: %w", opName, err)
|
||||
}
|
||||
if !o.isComposite() {
|
||||
fs.Debugf(o, "%s non-chunked object...", opName)
|
||||
@@ -1895,7 +1895,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.base.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -1904,7 +1904,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.base.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -2163,7 +2163,7 @@ func (o *Object) UnWrap() fs.Object {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// refuse to open unsupported format
|
||||
return nil, errors.Wrap(err, "can't open")
|
||||
return nil, fmt.Errorf("can't open: %w", err)
|
||||
}
|
||||
if !o.isComposite() {
|
||||
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
||||
|
||||
@@ -59,7 +59,7 @@ var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
assert.NotNil(t, obj, message)
|
||||
return obj
|
||||
}
|
||||
@@ -440,7 +440,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
|
||||
checkSmallFile := func(name, contents string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
||||
put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
||||
assert.NotNil(t, put)
|
||||
checkSmallFileInternals(put)
|
||||
checkContents(put, contents)
|
||||
@@ -489,7 +489,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
|
||||
newFile := func(name string) fs.Object {
|
||||
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj
|
||||
}
|
||||
@@ -599,7 +599,7 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
||||
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
|
||||
filename = path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
|
||||
txnID = chunkObj.xactID
|
||||
@@ -716,7 +716,7 @@ func testFutureProof(t *testing.T, f *Fs) {
|
||||
name = f.makeChunkName(name, part-1, "", "")
|
||||
}
|
||||
item := fstest.Item{Path: name, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
||||
obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
||||
assert.NotNil(t, obj, msg)
|
||||
}
|
||||
|
||||
@@ -790,7 +790,7 @@ func testBackwardsCompatibility(t *testing.T, f *Fs) {
|
||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj, filename
|
||||
}
|
||||
@@ -844,7 +844,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
item := fstest.Item{Path: "movefile", ModTime: modTime}
|
||||
contents := "abcdef"
|
||||
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
||||
file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
||||
|
||||
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
|
||||
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
|
||||
|
||||
@@ -35,6 +35,7 @@ func TestIntegration(t *testing.T) {
|
||||
"MimeType",
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
"Metadata",
|
||||
},
|
||||
UnimplementableFsMethods: []string{
|
||||
"PublicLink",
|
||||
@@ -53,6 +54,7 @@ func TestIntegration(t *testing.T) {
|
||||
{Name: name, Key: "type", Value: "chunker"},
|
||||
{Name: name, Key: "remote", Value: tempDir},
|
||||
}
|
||||
opt.QuickTestOK = true
|
||||
}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
|
||||
991
backend/combine/combine.go
Normal file
991
backend/combine/combine.go
Normal file
@@ -0,0 +1,991 @@
|
||||
// Package combine implents a backend to combine multipe remotes in a directory tree
|
||||
package combine
|
||||
|
||||
/*
|
||||
Have API to add/remove branches in the combine
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "combine",
|
||||
Description: "Combine several remotes into one",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "upstreams",
|
||||
Help: `Upstreams for combining
|
||||
|
||||
These should be in the form
|
||||
|
||||
dir=remote:path dir2=remote2:path
|
||||
|
||||
Where before the = is specified the root directory and after is the remote to
|
||||
put there.
|
||||
|
||||
Embedded spaces can be added using quotes
|
||||
|
||||
"dir=remote:path with space" "dir2=remote2:path with space"
|
||||
|
||||
`,
|
||||
Required: true,
|
||||
Default: fs.SpaceSepList(nil),
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Upstreams fs.SpaceSepList `config:"upstreams"`
|
||||
}
|
||||
|
||||
// Fs represents a combine of upstreams
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
root string // the path we are working on
|
||||
hashSet hash.Set // common hashes
|
||||
when time.Time // directory times
|
||||
upstreams map[string]*upstream // map of upstreams
|
||||
}
|
||||
|
||||
// adjustment stores the info to add a prefix to a path or chop characters off
|
||||
type adjustment struct {
|
||||
root string
|
||||
rootSlash string
|
||||
mountpoint string
|
||||
mountpointSlash string
|
||||
}
|
||||
|
||||
// newAdjustment makes a new path adjustment adjusting between mountpoint and root
|
||||
//
|
||||
// mountpoint is the point the upstream is mounted and root is the combine root
|
||||
func newAdjustment(root, mountpoint string) (a adjustment) {
|
||||
return adjustment{
|
||||
root: root,
|
||||
rootSlash: root + "/",
|
||||
mountpoint: mountpoint,
|
||||
mountpointSlash: mountpoint + "/",
|
||||
}
|
||||
}
|
||||
|
||||
var errNotUnderRoot = errors.New("file not under root")
|
||||
|
||||
// do makes the adjustment on s, mapping an upstream path into a combine path
|
||||
func (a *adjustment) do(s string) (string, error) {
|
||||
absPath := join(a.mountpoint, s)
|
||||
if a.root == "" {
|
||||
return absPath, nil
|
||||
}
|
||||
if absPath == a.root {
|
||||
return "", nil
|
||||
}
|
||||
if !strings.HasPrefix(absPath, a.rootSlash) {
|
||||
return "", errNotUnderRoot
|
||||
}
|
||||
return absPath[len(a.rootSlash):], nil
|
||||
}
|
||||
|
||||
// undo makes the adjustment on s, mapping a combine path into an upstream path
|
||||
func (a *adjustment) undo(s string) (string, error) {
|
||||
absPath := join(a.root, s)
|
||||
if absPath == a.mountpoint {
|
||||
return "", nil
|
||||
}
|
||||
if !strings.HasPrefix(absPath, a.mountpointSlash) {
|
||||
return "", errNotUnderRoot
|
||||
}
|
||||
return absPath[len(a.mountpointSlash):], nil
|
||||
}
|
||||
|
||||
// upstream represents an upstream Fs
|
||||
type upstream struct {
|
||||
f fs.Fs
|
||||
parent *Fs
|
||||
dir string // directory the upstream is mounted
|
||||
pathAdjustment adjustment // how to fiddle with the path
|
||||
}
|
||||
|
||||
// Create an upstream from the directory it is mounted on and the remote
|
||||
func (f *Fs) newUpstream(ctx context.Context, dir, remote string) (*upstream, error) {
|
||||
uFs, err := cache.Get(ctx, remote)
|
||||
if err == fs.ErrorIsFile {
|
||||
return nil, fmt.Errorf("can't combine files yet, only directories %q: %w", remote, err)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create upstream %q: %w", remote, err)
|
||||
}
|
||||
u := &upstream{
|
||||
f: uFs,
|
||||
parent: f,
|
||||
dir: dir,
|
||||
pathAdjustment: newAdjustment(f.root, dir),
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
|
||||
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Backward compatible to old config
|
||||
if len(opt.Upstreams) == 0 {
|
||||
return nil, errors.New("combine can't point to an empty upstream - check the value of the upstreams setting")
|
||||
}
|
||||
for _, u := range opt.Upstreams {
|
||||
if strings.HasPrefix(u, name+":") {
|
||||
return nil, errors.New("can't point combine remote at itself - check the value of the upstreams setting")
|
||||
}
|
||||
}
|
||||
isDir := false
|
||||
for strings.HasSuffix(root, "/") {
|
||||
root = root[:len(root)-1]
|
||||
isDir = true
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
upstreams: make(map[string]*upstream, len(opt.Upstreams)),
|
||||
when: time.Now(),
|
||||
}
|
||||
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
var mu sync.Mutex
|
||||
for _, upstream := range opt.Upstreams {
|
||||
upstream := upstream
|
||||
g.Go(func() (err error) {
|
||||
equal := strings.IndexRune(upstream, '=')
|
||||
if equal < 0 {
|
||||
return fmt.Errorf("no \"=\" in upstream definition %q", upstream)
|
||||
}
|
||||
dir, remote := upstream[:equal], upstream[equal+1:]
|
||||
if dir == "" {
|
||||
return fmt.Errorf("empty dir in upstream definition %q", upstream)
|
||||
}
|
||||
if remote == "" {
|
||||
return fmt.Errorf("empty remote in upstream definition %q", upstream)
|
||||
}
|
||||
if strings.ContainsRune(dir, '/') {
|
||||
return fmt.Errorf("dirs can't contain / (yet): %q", dir)
|
||||
}
|
||||
u, err := f.newUpstream(gCtx, dir, remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mu.Lock()
|
||||
if _, found := f.upstreams[dir]; found {
|
||||
err = fmt.Errorf("duplicate directory name %q", dir)
|
||||
} else {
|
||||
f.upstreams[dir] = u
|
||||
}
|
||||
mu.Unlock()
|
||||
return err
|
||||
})
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// check features
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove := true
|
||||
for _, u := range f.upstreams {
|
||||
features = features.Mask(ctx, u.f) // Mask all upstream fs
|
||||
if !operations.CanServerSideMove(u.f) {
|
||||
canMove = false
|
||||
}
|
||||
}
|
||||
// We can move if all remotes support Move or Copy
|
||||
if canMove {
|
||||
features.Move = f.Move
|
||||
}
|
||||
|
||||
// Enable ListR when upstreams either support ListR or is local
|
||||
// But not when all upstreams are local
|
||||
if features.ListR == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().ListR != nil {
|
||||
features.ListR = f.ListR
|
||||
} else if !u.f.Features().IsLocal {
|
||||
features.ListR = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable Purge when any upstreams support it
|
||||
if features.Purge == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().Purge != nil {
|
||||
features.Purge = f.Purge
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable Shutdown when any upstreams support it
|
||||
if features.Shutdown == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().Shutdown != nil {
|
||||
features.Shutdown = f.Shutdown
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable DirCacheFlush when any upstreams support it
|
||||
if features.DirCacheFlush == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().DirCacheFlush != nil {
|
||||
features.DirCacheFlush = f.DirCacheFlush
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable ChangeNotify when any upstreams support it
|
||||
if features.ChangeNotify == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().ChangeNotify != nil {
|
||||
features.ChangeNotify = f.ChangeNotify
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
var hashSet hash.Set
|
||||
var first = true
|
||||
for _, u := range f.upstreams {
|
||||
if first {
|
||||
hashSet = u.f.Hashes()
|
||||
first = false
|
||||
} else {
|
||||
hashSet = hashSet.Overlap(u.f.Hashes())
|
||||
}
|
||||
}
|
||||
f.hashSet = hashSet
|
||||
|
||||
// Check to see if the root is actually a file
|
||||
if f.root != "" && !isDir {
|
||||
_, err := f.NewObject(ctx, "")
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile || err == fs.ErrorIsDir {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check to see if the root path is actually an existing file
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
// Adjust path adjustment to remove leaf
|
||||
for _, u := range f.upstreams {
|
||||
u.pathAdjustment = newAdjustment(f.root, u.dir)
|
||||
}
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Run a function over all the upstreams in parallel
|
||||
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
for _, u := range f.upstreams {
|
||||
u := u
|
||||
g.Go(func() (err error) {
|
||||
return fn(gCtx, u)
|
||||
})
|
||||
}
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
// join the elements together but unline path.Join return empty string
|
||||
func join(elem ...string) string {
|
||||
result := path.Join(elem...)
|
||||
if result == "." {
|
||||
return ""
|
||||
}
|
||||
if len(result) > 0 && result[0] == '/' {
|
||||
result = result[1:]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// find the upstream for the remote passed in, returning the upstream and the adjusted path
|
||||
func (f *Fs) findUpstream(remote string) (u *upstream, uRemote string, err error) {
|
||||
// defer log.Trace(remote, "")("f=%v, uRemote=%q, err=%v", &u, &uRemote, &err)
|
||||
for _, u := range f.upstreams {
|
||||
uRemote, err = u.pathAdjustment.undo(remote)
|
||||
if err == nil {
|
||||
return u, uRemote, nil
|
||||
}
|
||||
}
|
||||
return nil, "", fmt.Errorf("combine for remote %q: %w", remote, fs.ErrorDirNotFound)
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("combine root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// The root always exists
|
||||
if f.root == "" && dir == "" {
|
||||
return nil
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return u.f.Rmdir(ctx, uRemote)
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return f.hashSet
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// The root always exists
|
||||
if f.root == "" && dir == "" {
|
||||
return nil
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return u.f.Mkdir(ctx, uRemote)
|
||||
}
|
||||
|
||||
// purge the upstream or fallback to a slow way
|
||||
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
|
||||
if do := u.f.Features().Purge; do != nil {
|
||||
err = do(ctx, dir)
|
||||
} else {
|
||||
err = operations.Purge(ctx, u.f, dir)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Purge all files in the directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
if f.root == "" && dir == "" {
|
||||
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
return u.purge(ctx, "")
|
||||
})
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return u.purge(ctx, uRemote)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
dstU, dstRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
do := dstU.f.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
o, err := do(ctx, srcObj.Object, dstRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dstU.newObject(o), nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
dstU, dstRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
do := dstU.f.Features().Move
|
||||
useCopy := false
|
||||
if do == nil {
|
||||
do = dstU.f.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
useCopy = true
|
||||
}
|
||||
|
||||
o, err := do(ctx, srcObj.Object, dstRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If did Copy then remove the source object
|
||||
if useCopy {
|
||||
err = srcObj.Remove(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return dstU.newObject(o), nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
// defer log.Trace(f, "src=%v, srcRemote=%q, dstRemote=%q", src, srcRemote, dstRemote)("err=%v", &err)
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
dstU, dstURemote, err := f.findUpstream(dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srcU, srcURemote, err := srcFs.findUpstream(srcRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
do := dstU.f.Features().DirMove
|
||||
if do == nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
fs.Logf(dstU.f, "srcU.f=%v, srcURemote=%q, dstURemote=%q", srcU.f, srcURemote, dstURemote)
|
||||
return do(ctx, srcU.f, srcURemote, dstURemote)
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
// At least one value will be written to the channel,
|
||||
// specifying the initial value and updated values might
|
||||
// follow. A 0 Duration should pause the polling.
|
||||
// The ChangeNotify implementation must empty the channel
|
||||
// regularly. When the channel gets closed, the implementation
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
var uChans []chan time.Duration
|
||||
|
||||
for _, u := range f.upstreams {
|
||||
u := u
|
||||
if do := u.f.Features().ChangeNotify; do != nil {
|
||||
ch := make(chan time.Duration)
|
||||
uChans = append(uChans, ch)
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
newPath, err := u.pathAdjustment.do(path)
|
||||
if err != nil {
|
||||
fs.Logf(f, "ChangeNotify: unable to process %q: %s", path, err)
|
||||
return
|
||||
}
|
||||
fs.Debugf(f, "ChangeNotify: path %q entryType %d", newPath, entryType)
|
||||
notifyFunc(newPath, entryType)
|
||||
}
|
||||
do(ctx, wrappedNotifyFunc, ch)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
for i := range ch {
|
||||
for _, c := range uChans {
|
||||
c <- i
|
||||
}
|
||||
}
|
||||
for _, c := range uChans {
|
||||
close(c)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
ctx := context.Background()
|
||||
_ = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
if do := u.f.Features().DirCacheFlush; do != nil {
|
||||
do()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
||||
srcPath := src.Remote()
|
||||
u, uRemote, err := f.findUpstream(srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uSrc := operations.NewOverrideRemote(src, uRemote)
|
||||
var o fs.Object
|
||||
if stream {
|
||||
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
|
||||
} else {
|
||||
o, err = u.f.Put(ctx, in, uSrc, options...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return u.newObject(o), nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, false, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, true, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
usage := &fs.Usage{
|
||||
Total: new(int64),
|
||||
Used: new(int64),
|
||||
Trashed: new(int64),
|
||||
Other: new(int64),
|
||||
Free: new(int64),
|
||||
Objects: new(int64),
|
||||
}
|
||||
for _, u := range f.upstreams {
|
||||
doAbout := u.f.Features().About
|
||||
if doAbout == nil {
|
||||
continue
|
||||
}
|
||||
usg, err := doAbout(ctx)
|
||||
if errors.Is(err, fs.ErrorDirNotFound) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if usg.Total != nil && usage.Total != nil {
|
||||
*usage.Total += *usg.Total
|
||||
} else {
|
||||
usage.Total = nil
|
||||
}
|
||||
if usg.Used != nil && usage.Used != nil {
|
||||
*usage.Used += *usg.Used
|
||||
} else {
|
||||
usage.Used = nil
|
||||
}
|
||||
if usg.Trashed != nil && usage.Trashed != nil {
|
||||
*usage.Trashed += *usg.Trashed
|
||||
} else {
|
||||
usage.Trashed = nil
|
||||
}
|
||||
if usg.Other != nil && usage.Other != nil {
|
||||
*usage.Other += *usg.Other
|
||||
} else {
|
||||
usage.Other = nil
|
||||
}
|
||||
if usg.Free != nil && usage.Free != nil {
|
||||
*usage.Free += *usg.Free
|
||||
} else {
|
||||
usage.Free = nil
|
||||
}
|
||||
if usg.Objects != nil && usage.Objects != nil {
|
||||
*usage.Objects += *usg.Objects
|
||||
} else {
|
||||
usage.Objects = nil
|
||||
}
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Wraps entries for this upstream
|
||||
func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.DirEntries, error) {
|
||||
for i, entry := range entries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
entries[i] = u.newObject(x)
|
||||
case fs.Directory:
|
||||
newDir := fs.NewDirCopy(ctx, x)
|
||||
newPath, err := u.pathAdjustment.do(newDir.Remote())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newDir.SetRemote(newPath)
|
||||
entries[i] = newDir
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown entry type %T", entry)
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
||||
if f.root == "" && dir == "" {
|
||||
entries = make(fs.DirEntries, 0, len(f.upstreams))
|
||||
for combineDir := range f.upstreams {
|
||||
d := fs.NewDir(combineDir, f.when)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries, err = u.f.List(ctx, uRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return u.wrapEntries(ctx, entries)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
// defer log.Trace(f, "dir=%q, callback=%v", dir, callback)("err=%v", &err)
|
||||
if f.root == "" && dir == "" {
|
||||
rootEntries, err := f.List(ctx, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = callback(rootEntries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var mu sync.Mutex
|
||||
syncCallback := func(entries fs.DirEntries) error {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return callback(entries)
|
||||
}
|
||||
err = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
return f.ListR(ctx, u.dir, syncCallback)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wrapCallback := func(entries fs.DirEntries) error {
|
||||
entries, err := u.wrapEntries(ctx, entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(entries)
|
||||
}
|
||||
if do := u.f.Features().ListR; do != nil {
|
||||
err = do(ctx, uRemote, wrapCallback)
|
||||
} else {
|
||||
err = walk.ListR(ctx, u.f, uRemote, true, -1, walk.ListAll, wrapCallback)
|
||||
}
|
||||
if err == fs.ErrorDirNotFound {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewObject creates a new remote combine file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
u, uRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if uRemote == "" || strings.HasSuffix(uRemote, "/") {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
o, err := u.f.NewObject(ctx, uRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return u.newObject(o), nil
|
||||
}
|
||||
|
||||
// Precision is the greatest Precision of all upstreams
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
var greatestPrecision time.Duration
|
||||
for _, u := range f.upstreams {
|
||||
uPrecision := u.f.Precision()
|
||||
if uPrecision > greatestPrecision {
|
||||
greatestPrecision = uPrecision
|
||||
}
|
||||
}
|
||||
return greatestPrecision
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
if do := u.f.Features().Shutdown; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Object describes a wrapped Object
|
||||
//
|
||||
// This is a wrapped Object which knows its path prefix
|
||||
type Object struct {
|
||||
fs.Object
|
||||
u *upstream
|
||||
}
|
||||
|
||||
func (u *upstream) newObject(o fs.Object) *Object {
|
||||
return &Object{
|
||||
Object: o,
|
||||
u: u,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.u.parent
|
||||
}
|
||||
|
||||
// String returns the remote path
|
||||
func (o *Object) String() string {
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
newPath, err := o.u.pathAdjustment.do(o.Object.String())
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Bad object: %v", err)
|
||||
return err.Error()
|
||||
}
|
||||
return newPath
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if known
|
||||
func (o *Object) MimeType(ctx context.Context) (mimeType string) {
|
||||
if do, ok := o.Object.(fs.MimeTyper); ok {
|
||||
mimeType = do.MimeType(ctx)
|
||||
}
|
||||
return mimeType
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *Object) UnWrap() fs.Object {
|
||||
return o.Object
|
||||
}
|
||||
|
||||
// GetTier returns storage tier or class of the Object
|
||||
func (o *Object) GetTier() string {
|
||||
do, ok := o.Object.(fs.GetTierer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
do, ok := o.Object.(fs.IDer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.ID()
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
do, ok := o.Object.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// SetTier performs changing storage tier of the Object if
|
||||
// multiple storage classes supported
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
do, ok := o.Object.(fs.SetTierer)
|
||||
if !ok {
|
||||
return errors.New("underlying remote does not support SetTier")
|
||||
}
|
||||
return do.SetTier(tier)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
94
backend/combine/combine_internal_test.go
Normal file
94
backend/combine/combine_internal_test.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package combine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAdjustmentDo(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
root string
|
||||
mountpoint string
|
||||
in string
|
||||
want string
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
mountpoint: "mountpoint",
|
||||
in: "path/to/file.txt",
|
||||
want: "mountpoint/path/to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "mountpoint",
|
||||
mountpoint: "mountpoint",
|
||||
in: "path/to/file.txt",
|
||||
want: "path/to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "mountpoint/path",
|
||||
mountpoint: "mountpoint",
|
||||
in: "path/to/file.txt",
|
||||
want: "to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "mountpoint/path",
|
||||
mountpoint: "mountpoint",
|
||||
in: "wrongpath/to/file.txt",
|
||||
want: "",
|
||||
wantErr: errNotUnderRoot,
|
||||
},
|
||||
} {
|
||||
what := fmt.Sprintf("%+v", test)
|
||||
a := newAdjustment(test.root, test.mountpoint)
|
||||
got, gotErr := a.do(test.in)
|
||||
assert.Equal(t, test.wantErr, gotErr)
|
||||
assert.Equal(t, test.want, got, what)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAdjustmentUndo(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
root string
|
||||
mountpoint string
|
||||
in string
|
||||
want string
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
mountpoint: "mountpoint",
|
||||
in: "mountpoint/path/to/file.txt",
|
||||
want: "path/to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "mountpoint",
|
||||
mountpoint: "mountpoint",
|
||||
in: "path/to/file.txt",
|
||||
want: "path/to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "mountpoint/path",
|
||||
mountpoint: "mountpoint",
|
||||
in: "to/file.txt",
|
||||
want: "path/to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "wrongmountpoint/path",
|
||||
mountpoint: "mountpoint",
|
||||
in: "to/file.txt",
|
||||
want: "",
|
||||
wantErr: errNotUnderRoot,
|
||||
},
|
||||
} {
|
||||
what := fmt.Sprintf("%+v", test)
|
||||
a := newAdjustment(test.root, test.mountpoint)
|
||||
got, gotErr := a.undo(test.in)
|
||||
assert.Equal(t, test.wantErr, gotErr)
|
||||
assert.Equal(t, test.want, got, what)
|
||||
}
|
||||
|
||||
}
|
||||
81
backend/combine/combine_test.go
Normal file
81
backend/combine/combine_test.go
Normal file
@@ -0,0 +1,81 @@
|
||||
// Test Combine filesystem interface
|
||||
package combine_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/memory"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestLocal(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs := MakeTestDirs(t, 3)
|
||||
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=" + dirs[2]
|
||||
name := "TestCombineLocal"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":dir1",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestMemory(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
upstreams := "dir1=:memory:dir1 dir2=:memory:dir2 dir3=:memory:dir3"
|
||||
name := "TestCombineMemory"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":dir1",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestMixed(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs := MakeTestDirs(t, 2)
|
||||
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=:memory:dir3"
|
||||
name := "TestCombineMixed"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":dir1",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// MakeTestDirs makes directories in /tmp for testing
|
||||
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
|
||||
for i := 1; i <= n; i++ {
|
||||
dir := t.TempDir()
|
||||
dirs = append(dirs, dir)
|
||||
}
|
||||
return dirs
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -21,7 +22,6 @@ import (
|
||||
"github.com/buengese/sgzip"
|
||||
"github.com/gabriel-vasile/mimetype"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunkedreader"
|
||||
@@ -53,7 +53,7 @@ const (
|
||||
Gzip = 2
|
||||
)
|
||||
|
||||
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9-_]{11})$")
|
||||
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
@@ -70,6 +70,9 @@ func init() {
|
||||
Name: "compress",
|
||||
Description: "Compress a remote",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to compress.",
|
||||
@@ -143,7 +146,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
|
||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
|
||||
}
|
||||
|
||||
// Strip trailing slashes if they exist in rpath
|
||||
@@ -158,7 +161,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
wrappedFs, err = wInfo.NewFs(ctx, wName, remotePath, wConfig)
|
||||
}
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
|
||||
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
|
||||
}
|
||||
|
||||
// Create the wrapping fs
|
||||
@@ -180,6 +183,9 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
SetTier: true,
|
||||
BucketBased: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
// We support reading MIME types no matter the wrapped fs
|
||||
f.features.ReadMimeType = true
|
||||
@@ -222,7 +228,7 @@ func processFileName(compressedFileName string) (origFileName string, extension
|
||||
// Separate the filename and size from the extension
|
||||
extensionPos := strings.LastIndex(compressedFileName, ".")
|
||||
if extensionPos == -1 {
|
||||
return "", "", 0, errors.New("File name has no extension")
|
||||
return "", "", 0, errors.New("file name has no extension")
|
||||
}
|
||||
extension = compressedFileName[extensionPos:]
|
||||
nameWithSize := compressedFileName[:extensionPos]
|
||||
@@ -231,11 +237,11 @@ func processFileName(compressedFileName string) (origFileName string, extension
|
||||
}
|
||||
match := nameRegexp.FindStringSubmatch(nameWithSize)
|
||||
if match == nil || len(match) != 3 {
|
||||
return "", "", 0, errors.New("Invalid filename")
|
||||
return "", "", 0, errors.New("invalid filename")
|
||||
}
|
||||
size, err := base64ToInt64(match[2])
|
||||
if err != nil {
|
||||
return "", "", 0, errors.New("Could not decode size")
|
||||
return "", "", 0, errors.New("could not decode size")
|
||||
}
|
||||
return match[1], gzFileExt, size, nil
|
||||
}
|
||||
@@ -304,7 +310,7 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
||||
case fs.Directory:
|
||||
f.addDir(&newEntries, x)
|
||||
default:
|
||||
return nil, errors.Errorf("Unknown object type %T", entry)
|
||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
return newEntries, nil
|
||||
@@ -401,6 +407,10 @@ func isCompressible(r io.Reader) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ratio := float64(n) / float64(b.Len())
|
||||
return ratio > minCompressionRatio, nil
|
||||
}
|
||||
@@ -410,7 +420,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
||||
srcHash := hasher.Sums()[ht]
|
||||
dstHash, err := o.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read destination hash")
|
||||
return fmt.Errorf("failed to read destination hash: %w", err)
|
||||
}
|
||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||
// remove object
|
||||
@@ -418,7 +428,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return errors.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
|
||||
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -462,10 +472,10 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file")
|
||||
return nil, fmt.Errorf("failed to create temporary local FS to spool file: %w", err)
|
||||
}
|
||||
if _, err = io.Copy(tempFile, in); err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to write temporary local file")
|
||||
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
|
||||
}
|
||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||
return nil, err
|
||||
@@ -626,9 +636,11 @@ func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.Objec
|
||||
// Put the data
|
||||
mo, err = put(ctx, metaReader, f.wrapInfo(src, makeMetadataName(src.Remote()), int64(len(data))), options...)
|
||||
if err != nil {
|
||||
removeErr := mo.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
|
||||
if mo != nil {
|
||||
removeErr := mo.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -714,7 +726,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
|
||||
err = oldObj.(*Object).Object.Remove(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could remove original object")
|
||||
return nil, fmt.Errorf("couldn't remove original object: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -723,7 +735,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
if compressible {
|
||||
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Couldn't rename streamed Object.")
|
||||
return nil, fmt.Errorf("couldn't rename streamed object: %w", err)
|
||||
}
|
||||
newObj.Object = wrapObj
|
||||
}
|
||||
@@ -900,7 +912,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.Fs.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp: not supported by underlying remote")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -909,7 +921,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("can't About: not supported by underlying remote")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -1208,6 +1220,21 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.meta.MimeType
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
err := o.loadMetadataIfNotLoaded(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
do, ok := o.mo.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
@@ -1354,6 +1381,51 @@ func (o *ObjectInfo) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
return "", nil // cannot know the checksum
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *ObjectInfo) ID() string {
|
||||
do, ok := o.src.(fs.IDer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.ID()
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if
|
||||
// known, or "" if not
|
||||
func (o *ObjectInfo) MimeType(ctx context.Context) string {
|
||||
do, ok := o.src.(fs.MimeTyper)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.MimeType(ctx)
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *ObjectInfo) UnWrap() fs.Object {
|
||||
return fs.UnWrapObjectInfo(o.src)
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
do, ok := o.src.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// GetTier returns storage tier or class of the Object
|
||||
func (o *ObjectInfo) GetTier() string {
|
||||
do, ok := o.src.(fs.GetTierer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
do, ok := o.Object.(fs.IDer)
|
||||
@@ -1406,11 +1478,6 @@ var (
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -61,5 +61,6 @@ func TestRemoteGzip(t *testing.T) {
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
gocipher "crypto/cipher"
|
||||
"crypto/rand"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
@@ -15,7 +17,7 @@ import (
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/Max-Sum/base32768"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
@@ -94,12 +96,12 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
|
||||
case "obfuscate":
|
||||
mode = NameEncryptionObfuscated
|
||||
default:
|
||||
err = errors.Errorf("Unknown file name encryption mode %q", s)
|
||||
err = fmt.Errorf("unknown file name encryption mode %q", s)
|
||||
}
|
||||
return mode, err
|
||||
}
|
||||
|
||||
// String turns mode into a human readable string
|
||||
// String turns mode into a human-readable string
|
||||
func (mode NameEncryptionMode) String() (out string) {
|
||||
switch mode {
|
||||
case NameEncryptionOff:
|
||||
@@ -114,6 +116,57 @@ func (mode NameEncryptionMode) String() (out string) {
|
||||
return out
|
||||
}
|
||||
|
||||
// fileNameEncoding are the encoding methods dealing with encrypted file names
|
||||
type fileNameEncoding interface {
|
||||
EncodeToString(src []byte) string
|
||||
DecodeString(s string) ([]byte, error)
|
||||
}
|
||||
|
||||
// caseInsensitiveBase32Encoding defines a file name encoding
|
||||
// using a modified version of standard base32 as described in
|
||||
// RFC4648
|
||||
//
|
||||
// The standard encoding is modified in two ways
|
||||
// * it becomes lower case (no-one likes upper case filenames!)
|
||||
// * we strip the padding character `=`
|
||||
type caseInsensitiveBase32Encoding struct{}
|
||||
|
||||
// EncodeToString encodes a strign using the modified version of
|
||||
// base32 encoding.
|
||||
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
|
||||
encoded := base32.HexEncoding.EncodeToString(src)
|
||||
encoded = strings.TrimRight(encoded, "=")
|
||||
return strings.ToLower(encoded)
|
||||
}
|
||||
|
||||
// DecodeString decodes a string as encoded by EncodeToString
|
||||
func (caseInsensitiveBase32Encoding) DecodeString(s string) ([]byte, error) {
|
||||
if strings.HasSuffix(s, "=") {
|
||||
return nil, ErrorBadBase32Encoding
|
||||
}
|
||||
// First figure out how many padding characters to add
|
||||
roundUpToMultipleOf8 := (len(s) + 7) &^ 7
|
||||
equals := roundUpToMultipleOf8 - len(s)
|
||||
s = strings.ToUpper(s) + "========"[:equals]
|
||||
return base32.HexEncoding.DecodeString(s)
|
||||
}
|
||||
|
||||
// NewNameEncoding creates a NameEncoding from a string
|
||||
func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
case "base32":
|
||||
enc = caseInsensitiveBase32Encoding{}
|
||||
case "base64":
|
||||
enc = base64.RawURLEncoding
|
||||
case "base32768":
|
||||
enc = base32768.SafeEncoding
|
||||
default:
|
||||
err = fmt.Errorf("unknown file name encoding mode %q", s)
|
||||
}
|
||||
return enc, err
|
||||
}
|
||||
|
||||
// Cipher defines an encoding and decoding cipher for the crypt backend
|
||||
type Cipher struct {
|
||||
dataKey [32]byte // Key for secretbox
|
||||
@@ -121,15 +174,17 @@ type Cipher struct {
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
block gocipher.Block
|
||||
mode NameEncryptionMode
|
||||
fileNameEnc fileNameEncoding
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
}
|
||||
|
||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
|
||||
c := &Cipher{
|
||||
mode: mode,
|
||||
fileNameEnc: enc,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
}
|
||||
@@ -187,30 +242,6 @@ func (c *Cipher) putBlock(buf []byte) {
|
||||
c.buffers.Put(buf)
|
||||
}
|
||||
|
||||
// encodeFileName encodes a filename using a modified version of
|
||||
// standard base32 as described in RFC4648
|
||||
//
|
||||
// The standard encoding is modified in two ways
|
||||
// * it becomes lower case (no-one likes upper case filenames!)
|
||||
// * we strip the padding character `=`
|
||||
func encodeFileName(in []byte) string {
|
||||
encoded := base32.HexEncoding.EncodeToString(in)
|
||||
encoded = strings.TrimRight(encoded, "=")
|
||||
return strings.ToLower(encoded)
|
||||
}
|
||||
|
||||
// decodeFileName decodes a filename as encoded by encodeFileName
|
||||
func decodeFileName(in string) ([]byte, error) {
|
||||
if strings.HasSuffix(in, "=") {
|
||||
return nil, ErrorBadBase32Encoding
|
||||
}
|
||||
// First figure out how many padding characters to add
|
||||
roundUpToMultipleOf8 := (len(in) + 7) &^ 7
|
||||
equals := roundUpToMultipleOf8 - len(in)
|
||||
in = strings.ToUpper(in) + "========"[:equals]
|
||||
return base32.HexEncoding.DecodeString(in)
|
||||
}
|
||||
|
||||
// encryptSegment encrypts a path segment
|
||||
//
|
||||
// This uses EME with AES
|
||||
@@ -231,7 +262,7 @@ func (c *Cipher) encryptSegment(plaintext string) string {
|
||||
}
|
||||
paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext))
|
||||
ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt)
|
||||
return encodeFileName(ciphertext)
|
||||
return c.fileNameEnc.EncodeToString(ciphertext)
|
||||
}
|
||||
|
||||
// decryptSegment decrypts a path segment
|
||||
@@ -239,7 +270,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
if ciphertext == "" {
|
||||
return "", nil
|
||||
}
|
||||
rawCiphertext, err := decodeFileName(ciphertext)
|
||||
rawCiphertext, err := c.fileNameEnc.DecodeString(ciphertext)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -580,7 +611,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
|
||||
func (n *nonce) fromReader(in io.Reader) error {
|
||||
read, err := io.ReadFull(in, (*n)[:])
|
||||
if read != fileNonceSize {
|
||||
return errors.Wrap(err, "short read of nonce")
|
||||
return fmt.Errorf("short read of nonce: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -956,7 +987,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
|
||||
// Re-open the underlying object with the offset given
|
||||
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
|
||||
if err != nil {
|
||||
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
|
||||
return 0, fh.finish(fmt.Errorf("couldn't reopen file with offset and limit: %w", err))
|
||||
}
|
||||
|
||||
// Set the file handle
|
||||
|
||||
@@ -4,13 +4,15 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/Max-Sum/base32768"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -45,11 +47,31 @@ func TestNewNameEncryptionModeString(t *testing.T) {
|
||||
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
|
||||
}
|
||||
|
||||
func TestEncodeFileName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
type EncodingTestCase struct {
|
||||
in string
|
||||
expected string
|
||||
}
|
||||
|
||||
func testEncodeFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
for _, test := range testCases {
|
||||
enc, err := NewNameEncoding(encoding)
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
actual := enc.EncodeToString([]byte(test.in))
|
||||
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
|
||||
recovered, err := enc.DecodeString(test.expected)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
|
||||
if caseInsensitive {
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = enc.DecodeString(in)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeFileNameBase32(t *testing.T) {
|
||||
testEncodeFileName(t, "base32", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "64"},
|
||||
{"12", "64p0"},
|
||||
@@ -67,20 +89,56 @@ func TestEncodeFileName(t *testing.T) {
|
||||
{"12345678901234", "64p36d1l6orjge9g64p36d0"},
|
||||
{"123456789012345", "64p36d1l6orjge9g64p36d1l"},
|
||||
{"1234567890123456", "64p36d1l6orjge9g64p36d1l6o"},
|
||||
} {
|
||||
actual := encodeFileName([]byte(test.in))
|
||||
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
|
||||
recovered, err := decodeFileName(test.expected)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = decodeFileName(in)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
|
||||
}
|
||||
}, true)
|
||||
}
|
||||
|
||||
func TestDecodeFileName(t *testing.T) {
|
||||
func TestEncodeFileNameBase64(t *testing.T) {
|
||||
testEncodeFileName(t, "base64", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "MQ"},
|
||||
{"12", "MTI"},
|
||||
{"123", "MTIz"},
|
||||
{"1234", "MTIzNA"},
|
||||
{"12345", "MTIzNDU"},
|
||||
{"123456", "MTIzNDU2"},
|
||||
{"1234567", "MTIzNDU2Nw"},
|
||||
{"12345678", "MTIzNDU2Nzg"},
|
||||
{"123456789", "MTIzNDU2Nzg5"},
|
||||
{"1234567890", "MTIzNDU2Nzg5MA"},
|
||||
{"12345678901", "MTIzNDU2Nzg5MDE"},
|
||||
{"123456789012", "MTIzNDU2Nzg5MDEy"},
|
||||
{"1234567890123", "MTIzNDU2Nzg5MDEyMw"},
|
||||
{"12345678901234", "MTIzNDU2Nzg5MDEyMzQ"},
|
||||
{"123456789012345", "MTIzNDU2Nzg5MDEyMzQ1"},
|
||||
{"1234567890123456", "MTIzNDU2Nzg5MDEyMzQ1Ng"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestEncodeFileNameBase32768(t *testing.T) {
|
||||
testEncodeFileName(t, "base32768", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "㼿"},
|
||||
{"12", "㻙ɟ"},
|
||||
{"123", "㻙ⲿ"},
|
||||
{"1234", "㻙ⲍƟ"},
|
||||
{"12345", "㻙ⲍ⍟"},
|
||||
{"123456", "㻙ⲍ⍆ʏ"},
|
||||
{"1234567", "㻙ⲍ⍆觟"},
|
||||
{"12345678", "㻙ⲍ⍆觓ɧ"},
|
||||
{"123456789", "㻙ⲍ⍆觓栯"},
|
||||
{"1234567890", "㻙ⲍ⍆觓栩ɣ"},
|
||||
{"12345678901", "㻙ⲍ⍆觓栩朧"},
|
||||
{"123456789012", "㻙ⲍ⍆觓栩朤ʅ"},
|
||||
{"1234567890123", "㻙ⲍ⍆觓栩朤談"},
|
||||
{"12345678901234", "㻙ⲍ⍆觓栩朤諆ɔ"},
|
||||
{"123456789012345", "㻙ⲍ⍆觓栩朤諆媕"},
|
||||
{"1234567890123456", "㻙ⲍ⍆觓栩朤諆媕䆿"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestDecodeFileNameBase32(t *testing.T) {
|
||||
enc, err := NewNameEncoding("base32")
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -90,17 +148,65 @@ func TestDecodeFileName(t *testing.T) {
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{"hello=hello", base32.CorruptInputError(5)},
|
||||
} {
|
||||
actual, actualErr := decodeFileName(test.in)
|
||||
actual, actualErr := enc.DecodeString(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryptSegment(t *testing.T) {
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
func TestDecodeFileNameBase64(t *testing.T) {
|
||||
enc, err := NewNameEncoding("base64")
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expected string
|
||||
in string
|
||||
expectedErr error
|
||||
}{
|
||||
{"64=", base64.CorruptInputError(2)},
|
||||
{"!", base64.CorruptInputError(0)},
|
||||
{"Hello=Hello", base64.CorruptInputError(5)},
|
||||
} {
|
||||
actual, actualErr := enc.DecodeString(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeFileNameBase32768(t *testing.T) {
|
||||
enc, err := NewNameEncoding("base32768")
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
}{
|
||||
{"㼿c", base32768.CorruptInputError(1)},
|
||||
{"!", base32768.CorruptInputError(0)},
|
||||
{"㻙ⲿ=㻙ⲿ", base32768.CorruptInputError(2)},
|
||||
} {
|
||||
actual, actualErr := enc.DecodeString(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func testEncryptSegment(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range testCases {
|
||||
actual := c.encryptSegment(test.in)
|
||||
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
|
||||
recovered, err := c.decryptSegment(test.expected)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
if caseInsensitive {
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = c.decryptSegment(in)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryptSegmentBase32(t *testing.T) {
|
||||
testEncryptSegment(t, "base32", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"12", "l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
@@ -118,26 +224,61 @@ func TestEncryptSegment(t *testing.T) {
|
||||
{"12345678901234", "moq0uqdlqrblrc5pa5u5c7hq9g"},
|
||||
{"123456789012345", "eeam3li4rnommi3a762h5n7meg"},
|
||||
{"1234567890123456", "mijbj0frqf6ms7frcr6bd9h0env53jv96pjaaoirk7forcgpt70g"},
|
||||
} {
|
||||
actual := c.encryptSegment(test.in)
|
||||
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
|
||||
recovered, err := c.decryptSegment(test.expected)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = c.decryptSegment(in)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
|
||||
}
|
||||
}, true)
|
||||
}
|
||||
|
||||
func TestDecryptSegment(t *testing.T) {
|
||||
func TestEncryptSegmentBase64(t *testing.T) {
|
||||
testEncryptSegment(t, "base64", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"12", "qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"123", "1CxFf2Mti1xIPYlGruDh-A"},
|
||||
{"1234", "RL-xOTmsxsG7kuTy2XJUxw"},
|
||||
{"12345", "3FP_GHoeBJdq0yLgaED8IQ"},
|
||||
{"123456", "Xc4T1Gqrs3OVYnrE6dpEWQ"},
|
||||
{"1234567", "uZeEzssOnDWHEOzLqjwpog"},
|
||||
{"12345678", "8noiTP5WkkbEuijsPhOpxQ"},
|
||||
{"123456789", "GeNxgLA0wiaGAKU3U7qL4Q"},
|
||||
{"1234567890", "x1DUhdmqoVWYVBLD3dha-A"},
|
||||
{"12345678901", "iEyP_3BZR6vvv_2WM6NbZw"},
|
||||
{"123456789012", "4OPGvS4SZdjvS568APUaFw"},
|
||||
{"1234567890123", "Y8c5Wr8OhYYUo7fPwdojdg"},
|
||||
{"12345678901234", "tjQPabXW112wuVF8Vh46TA"},
|
||||
{"123456789012345", "c5Vh1kTd8WtIajmFEtz2dA"},
|
||||
{"1234567890123456", "tKa5gfvTzW4d-2bMtqYgdf5Rz-k2ZqViW6HfjbIZ6cE"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestEncryptSegmentBase32768(t *testing.T) {
|
||||
testEncryptSegment(t, "base32768", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"12", "竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"123", "遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
{"1234", "䢟銮䵵狌㐜燳谒颴詟"},
|
||||
{"12345", "钉Ꞇ㖃蚩憶狫朰杜㜿"},
|
||||
{"123456", "啇ᚵⵕ憗䋫➫➓肤卟"},
|
||||
{"1234567", "茫螓翁連劘樓㶔抉矟"},
|
||||
{"12345678", "龝☳䘊辄岅較络㧩襟"},
|
||||
{"123456789", "ⲱ苀㱆犂媐Ꮤ锇惫靟"},
|
||||
{"1234567890", "計宁憕偵匢皫╛纺ꌟ"},
|
||||
{"12345678901", "檆䨿鑫㪺藝ꡖ勇䦛婟"},
|
||||
{"123456789012", "雑頏䰂䲝淚哚鹡魺⪟"},
|
||||
{"1234567890123", "塃璶繁躸圅㔟䗃肃懟"},
|
||||
{"12345678901234", "腺ᕚ崚鏕鏥讥鼌䑺䲿"},
|
||||
{"123456789012345", "怪绕滻蕶肣但⠥荖惟"},
|
||||
{"1234567890123456", "肳哀旚挶靏鏻㾭䱠慟㪳ꏆ賊兲铧敻塹魀ʟ"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestDecryptSegmentBase32(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 3328)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
enc, _ := NewNameEncoding("base32")
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
@@ -145,118 +286,371 @@ func TestDecryptSegment(t *testing.T) {
|
||||
{"64=", ErrorBadBase32Encoding},
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryptFileName(t *testing.T) {
|
||||
func TestDecryptSegmentBase64(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 2816)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
enc, _ := NewNameEncoding("base64")
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
}{
|
||||
{"6H=", base64.CorruptInputError(2)},
|
||||
{"!", base64.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecryptSegmentBase32768(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := strings.Repeat("怪", 1280)
|
||||
enc, _ := NewNameEncoding("base32768")
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
}{
|
||||
{"怪=", base32768.CorruptInputError(1)},
|
||||
{"!", base32768.CorruptInputError(0)},
|
||||
{longName, ErrorTooLongAfterDecode},
|
||||
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func testStandardEncryptFileName(t *testing.T, encoding string, testCasesEncryptDir []EncodingTestCase, testCasesNoEncryptDir []EncodingTestCase) {
|
||||
// First standard mode
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range testCasesEncryptDir {
|
||||
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
|
||||
}
|
||||
// Standard mode with directory name encryption off
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
||||
// Now off mode
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
for _, test := range testCasesNoEncryptDir {
|
||||
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStandardEncryptFileNameBase32(t *testing.T) {
|
||||
testStandardEncryptFileName(t, "base32", []EncodingTestCase{
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
|
||||
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
|
||||
}, []EncodingTestCase{
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"1/12", "1/l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
{"1/12/123", "1/12/qgm4avr35m5loi1th53ato71v0"},
|
||||
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptFileNameBase64(t *testing.T) {
|
||||
testStandardEncryptFileName(t, "base64", []EncodingTestCase{
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
|
||||
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
|
||||
}, []EncodingTestCase{
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"1/12", "1/qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"1/12/123", "1/12/1CxFf2Mti1xIPYlGruDh-A"},
|
||||
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "1/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptFileNameBase32768(t *testing.T) {
|
||||
testStandardEncryptFileName(t, "base32768", []EncodingTestCase{
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
|
||||
}, []EncodingTestCase{
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"1/12", "1/竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"1/12/123", "1/12/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "1/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNonStandardEncryptFileName(t *testing.T) {
|
||||
// Off mode
|
||||
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
|
||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||
// Obfuscation mode
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
|
||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
|
||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||
// Obfuscation mode with directory name encryption off
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", false, nil)
|
||||
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||
}
|
||||
|
||||
func TestDecryptFileName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
||||
func testStandardDecryptFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range testCases {
|
||||
// Test when dirNameEncrypt=true
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
assert.Equal(t, test.expectedErr, actualErr, what)
|
||||
assert.NoError(t, actualErr)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
if caseInsensitive {
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
actual, actualErr := c.DecryptFileName(strings.ToUpper(test.in))
|
||||
assert.NoError(t, actualErr)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
}
|
||||
// Add a character should raise ErrorNotAMultipleOfBlocksize
|
||||
actual, actualErr = c.DecryptFileName(enc.EncodeToString([]byte("1")) + test.in)
|
||||
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
|
||||
assert.Equal(t, "", actual)
|
||||
// Test when dirNameEncrypt=false
|
||||
noDirEncryptIn := test.in
|
||||
if strings.LastIndex(test.expected, "/") != -1 {
|
||||
noDirEncryptIn = test.expected[:strings.LastIndex(test.expected, "/")] + test.in[strings.LastIndex(test.in, "/"):]
|
||||
}
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
actual, actualErr = c.DecryptFileName(noDirEncryptIn)
|
||||
assert.NoError(t, actualErr)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStandardDecryptFileNameBase32(t *testing.T) {
|
||||
testStandardDecryptFileName(t, "base32", []EncodingTestCase{
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
|
||||
}, true)
|
||||
}
|
||||
|
||||
func TestStandardDecryptFileNameBase64(t *testing.T) {
|
||||
testStandardDecryptFileName(t, "base64", []EncodingTestCase{
|
||||
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestStandardDecryptFileNameBase32768(t *testing.T) {
|
||||
testStandardDecryptFileName(t, "base32768", []EncodingTestCase{
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestNonStandardDecryptFileName(t *testing.T) {
|
||||
for _, encoding := range []string{"base32", "base64", "base32768"} {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
assert.Equal(t, test.expectedErr, actualErr, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncDecMatches(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
in string
|
||||
}{
|
||||
{NameEncryptionStandard, "1/2/3/4"},
|
||||
{NameEncryptionOff, "1/2/3/4"},
|
||||
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
|
||||
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", true)
|
||||
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, out, test.in, what)
|
||||
assert.Equal(t, err, nil, what)
|
||||
for _, encoding := range []string{"base32", "base64", "base32768"} {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
in string
|
||||
}{
|
||||
{NameEncryptionStandard, "1/2/3/4"},
|
||||
{NameEncryptionOff, "1/2/3/4"},
|
||||
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
|
||||
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", true, enc)
|
||||
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, out, test.in, what)
|
||||
assert.Equal(t, err, nil, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryptDirName(t *testing.T) {
|
||||
func testStandardEncryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
// First standard mode
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptDirName("1"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptDirName("1/12"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptDirName("1/12/123"))
|
||||
// Standard mode with dir name encryption off
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
||||
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
// Now off mode
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
for _, test := range testCases {
|
||||
assert.Equal(t, test.expected, c.EncryptDirName(test.in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecryptDirName(t *testing.T) {
|
||||
func TestStandardEncryptDirNameBase32(t *testing.T) {
|
||||
testStandardEncryptDirName(t, "base32", []EncodingTestCase{
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptDirNameBase64(t *testing.T) {
|
||||
testStandardEncryptDirName(t, "base64", []EncodingTestCase{
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptDirNameBase32768(t *testing.T) {
|
||||
testStandardEncryptDirName(t, "base32768", []EncodingTestCase{
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNonStandardEncryptDirName(t *testing.T) {
|
||||
for _, encoding := range []string{"base32", "base64", "base32768"} {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
// Now off mode
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true, enc)
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
}
|
||||
}
|
||||
|
||||
func testStandardDecryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range testCases {
|
||||
// Test dirNameEncrypt=true
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
actual, actualErr := c.DecryptDirName(test.in)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
assert.NoError(t, actualErr)
|
||||
if caseInsensitive {
|
||||
actual, actualErr := c.DecryptDirName(strings.ToUpper(test.in))
|
||||
assert.Equal(t, actual, test.expected)
|
||||
assert.NoError(t, actualErr)
|
||||
}
|
||||
actual, actualErr = c.DecryptDirName(enc.EncodeToString([]byte("1")) + test.in)
|
||||
assert.Equal(t, "", actual)
|
||||
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
|
||||
// Test dirNameEncrypt=false
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
actual, actualErr = c.DecryptDirName(test.in)
|
||||
assert.Equal(t, test.in, actual)
|
||||
assert.NoError(t, actualErr)
|
||||
actual, actualErr = c.DecryptDirName(test.expected)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
assert.NoError(t, actualErr)
|
||||
// Test dirNameEncrypt=false
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
|
||||
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||
actual, actualErr := c.DecryptDirName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
assert.Equal(t, test.expectedErr, actualErr, what)
|
||||
}
|
||||
*/
|
||||
|
||||
func TestStandardDecryptDirNameBase32(t *testing.T) {
|
||||
testStandardDecryptDirName(t, "base32", []EncodingTestCase{
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
|
||||
}, true)
|
||||
}
|
||||
|
||||
func TestStandardDecryptDirNameBase64(t *testing.T) {
|
||||
testStandardDecryptDirName(t, "base64", []EncodingTestCase{
|
||||
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestStandardDecryptDirNameBase32768(t *testing.T) {
|
||||
testStandardDecryptDirName(t, "base32768", []EncodingTestCase{
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestNonStandardDecryptDirName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
@@ -264,18 +658,11 @@ func TestDecryptDirName(t *testing.T) {
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
|
||||
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123.bin", nil},
|
||||
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, ".bin", ".bin", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, nil)
|
||||
actual, actualErr := c.DecryptDirName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
@@ -284,7 +671,7 @@ func TestDecryptDirName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEncryptedSize(t *testing.T) {
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
for _, test := range []struct {
|
||||
in int64
|
||||
expected int64
|
||||
@@ -308,7 +695,7 @@ func TestEncryptedSize(t *testing.T) {
|
||||
|
||||
func TestDecryptedSize(t *testing.T) {
|
||||
// Test the errors since we tested the reverse above
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
for _, test := range []struct {
|
||||
in int64
|
||||
expectedErr error
|
||||
@@ -637,7 +1024,7 @@ func (r *randomSource) Read(p []byte) (n int, err error) {
|
||||
func (r *randomSource) Write(p []byte) (n int, err error) {
|
||||
for i := range p {
|
||||
if p[i] != r.next() {
|
||||
return 0, errors.Errorf("Error in stream at %d", r.counter)
|
||||
return 0, fmt.Errorf("Error in stream at %d", r.counter)
|
||||
}
|
||||
}
|
||||
return len(p), nil
|
||||
@@ -679,7 +1066,7 @@ func (z *zeroes) Read(p []byte) (n int, err error) {
|
||||
|
||||
// Test encrypt decrypt with different buffer sizes
|
||||
func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = &zeroes{} // zero out the nonce
|
||||
buf := make([]byte, bufSize)
|
||||
@@ -749,7 +1136,7 @@ func TestEncryptData(t *testing.T) {
|
||||
{[]byte{1}, file1},
|
||||
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, file16},
|
||||
} {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
@@ -772,7 +1159,7 @@ func TestEncryptData(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewEncrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
@@ -788,13 +1175,12 @@ func TestNewEncrypter(t *testing.T) {
|
||||
fh, err = c.newEncrypter(z, nil)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
|
||||
}
|
||||
|
||||
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
|
||||
// cause a fatal loop
|
||||
func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
@@ -823,7 +1209,7 @@ func (c *closeDetector) Close() error {
|
||||
}
|
||||
|
||||
func TestNewDecrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
@@ -866,7 +1252,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
|
||||
// Test the stream returning 0, io.ErrUnexpectedEOF
|
||||
func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
@@ -882,7 +1268,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = &zeroes{} // nodge the crypto rand generator
|
||||
|
||||
@@ -1088,7 +1474,7 @@ func TestDecrypterCalculateUnderlying(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDecrypterRead(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test truncating the file at each possible point
|
||||
@@ -1152,7 +1538,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDecrypterClose(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cd := newCloseDetector(bytes.NewBuffer(file16))
|
||||
@@ -1190,7 +1576,7 @@ func TestDecrypterClose(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPutGetBlock(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
block := c.getBlock()
|
||||
@@ -1201,7 +1587,7 @@ func TestPutGetBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKey(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check zero keys OK
|
||||
|
||||
@@ -3,13 +3,13 @@ package crypt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
@@ -28,6 +28,9 @@ func init() {
|
||||
Description: "Encrypt/Decrypt a remote",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
@@ -116,6 +119,29 @@ names, or for debugging purposes.`,
|
||||
Help: "Encrypt file data.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "filename_encoding",
|
||||
Help: `How to encode the encrypted filename to text string.
|
||||
|
||||
This option could help with shortening the encrypted filename. The
|
||||
suitable option would depend on the way your remote count the filename
|
||||
length and if it's case sensitve.`,
|
||||
Default: "base32",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "base32",
|
||||
Help: "Encode using base32. Suitable for all remote.",
|
||||
},
|
||||
{
|
||||
Value: "base64",
|
||||
Help: "Encode using base64. Suitable for case sensitive remote.",
|
||||
},
|
||||
{
|
||||
Value: "base32768",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -131,18 +157,22 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
||||
}
|
||||
password, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password")
|
||||
return nil, fmt.Errorf("failed to decrypt password: %w", err)
|
||||
}
|
||||
var salt string
|
||||
if opt.Password2 != "" {
|
||||
salt, err = obscure.Reveal(opt.Password2)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password2")
|
||||
return nil, fmt.Errorf("failed to decrypt password2: %w", err)
|
||||
}
|
||||
}
|
||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
|
||||
enc, err := NewNameEncoding(opt.FilenameEncoding)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make cipher")
|
||||
return nil, err
|
||||
}
|
||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption, enc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
||||
}
|
||||
return cipher, nil
|
||||
}
|
||||
@@ -192,7 +222,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
}
|
||||
}
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
|
||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
|
||||
}
|
||||
f := &Fs{
|
||||
Fs: wrappedFs,
|
||||
@@ -214,6 +244,9 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
return f, err
|
||||
@@ -229,6 +262,7 @@ type Options struct {
|
||||
Password2 string `config:"password2"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
FilenameEncoding string `config:"filename_encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
@@ -300,7 +334,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
|
||||
case fs.Directory:
|
||||
f.addDir(ctx, &newEntries, x)
|
||||
default:
|
||||
return nil, errors.Errorf("Unknown object type %T", entry)
|
||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
return newEntries, nil
|
||||
@@ -406,7 +440,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
var dstHash string
|
||||
dstHash, err = o.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||
return nil, fmt.Errorf("failed to read destination hash: %w", err)
|
||||
}
|
||||
if srcHash != "" && dstHash != "" {
|
||||
if srcHash != dstHash {
|
||||
@@ -415,7 +449,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||
}
|
||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||
}
|
||||
@@ -569,7 +603,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.Fs.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -578,7 +612,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -616,24 +650,24 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
|
||||
// Open the src for input
|
||||
in, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open src")
|
||||
return "", fmt.Errorf("failed to open src: %w", err)
|
||||
}
|
||||
defer fs.CheckClose(in, &err)
|
||||
|
||||
// Now encrypt the src with the nonce
|
||||
out, err := f.cipher.newEncrypter(in, &nonce)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make encrypter")
|
||||
return "", fmt.Errorf("failed to make encrypter: %w", err)
|
||||
}
|
||||
|
||||
// pipe into hash
|
||||
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make hasher")
|
||||
return "", fmt.Errorf("failed to make hasher: %w", err)
|
||||
}
|
||||
_, err = io.Copy(m, out)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to hash data")
|
||||
return "", fmt.Errorf("failed to hash data: %w", err)
|
||||
}
|
||||
|
||||
return m.Sums()[hashType], nil
|
||||
@@ -652,12 +686,12 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
||||
// use a limited read so we only read the header
|
||||
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
|
||||
}
|
||||
d, err := f.cipher.newDecrypter(in)
|
||||
if err != nil {
|
||||
_ = in.Close()
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
|
||||
}
|
||||
nonce := d.nonce
|
||||
// fs.Debugf(o, "Read nonce % 2x", nonce)
|
||||
@@ -676,7 +710,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
||||
// Close d (and hence in) once we have read the nonce
|
||||
err = d.Close()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to close nonce read")
|
||||
return "", fmt.Errorf("failed to close nonce read: %w", err)
|
||||
}
|
||||
|
||||
return f.computeHashWithNonce(ctx, nonce, src, hashType)
|
||||
@@ -795,7 +829,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
for _, encryptedFileName := range arg {
|
||||
fileName, err := f.DecryptFileName(encryptedFileName)
|
||||
if err != nil {
|
||||
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
|
||||
return out, fmt.Errorf("failed to decrypt: %s: %w", encryptedFileName, err)
|
||||
}
|
||||
out = append(out, fileName)
|
||||
}
|
||||
@@ -1028,6 +1062,50 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetTier returns storage tier or class of the Object
|
||||
func (o *ObjectInfo) GetTier() string {
|
||||
do, ok := o.ObjectInfo.(fs.GetTierer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *ObjectInfo) ID() string {
|
||||
do, ok := o.ObjectInfo.(fs.IDer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.ID()
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
do, ok := o.ObjectInfo.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if
|
||||
// known, or "" if not
|
||||
//
|
||||
// This is deliberately unsupported so we don't leak mime type info by
|
||||
// default.
|
||||
func (o *ObjectInfo) MimeType(ctx context.Context) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *ObjectInfo) UnWrap() fs.Object {
|
||||
return fs.UnWrapObjectInfo(o.ObjectInfo)
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
do, ok := o.Object.(fs.IDer)
|
||||
@@ -1056,6 +1134,26 @@ func (o *Object) GetTier() string {
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
do, ok := o.Object.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if
|
||||
// known, or "" if not
|
||||
//
|
||||
// This is deliberately unsupported so we don't leak mime type info by
|
||||
// default.
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1078,10 +1176,6 @@ var (
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -91,7 +91,9 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
src := f.newObjectInfo(oi, nonce)
|
||||
|
||||
// Test ObjectInfo methods
|
||||
assert.Equal(t, int64(outBuf.Len()), src.Size())
|
||||
if !f.opt.NoDataEncryption {
|
||||
assert.Equal(t, int64(outBuf.Len()), src.Size())
|
||||
}
|
||||
assert.Equal(t, f, src.Fs())
|
||||
assert.NotEqual(t, path, src.Remote())
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ package crypt_test
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
@@ -29,7 +30,7 @@ func TestIntegration(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestStandard runs integration tests against the remote
|
||||
func TestStandard(t *testing.T) {
|
||||
func TestStandardBase32(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
@@ -46,6 +47,51 @@ func TestStandard(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardBase64(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name, Key: "filename_encoding", Value: "base64"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardBase32768(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -67,6 +113,7 @@ func TestOff(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -75,6 +122,9 @@ func TestObfuscate(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -89,6 +139,7 @@ func TestObfuscate(t *testing.T) {
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -97,6 +148,9 @@ func TestNoDataObfuscate(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt4"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -112,5 +166,6 @@ func TestNoDataObfuscate(t *testing.T) {
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,15 +4,15 @@
|
||||
// buffers which are a multiple of an underlying crypto block size.
|
||||
package pkcs7
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
import "errors"
|
||||
|
||||
// Errors Unpad can return
|
||||
var (
|
||||
ErrorPaddingNotFound = errors.New("Bad PKCS#7 padding - not padded")
|
||||
ErrorPaddingNotAMultiple = errors.New("Bad PKCS#7 padding - not a multiple of blocksize")
|
||||
ErrorPaddingTooLong = errors.New("Bad PKCS#7 padding - too long")
|
||||
ErrorPaddingTooShort = errors.New("Bad PKCS#7 padding - too short")
|
||||
ErrorPaddingNotAllTheSame = errors.New("Bad PKCS#7 padding - not all the same")
|
||||
ErrorPaddingNotFound = errors.New("bad PKCS#7 padding - not padded")
|
||||
ErrorPaddingNotAMultiple = errors.New("bad PKCS#7 padding - not a multiple of blocksize")
|
||||
ErrorPaddingTooLong = errors.New("bad PKCS#7 padding - too long")
|
||||
ErrorPaddingTooShort = errors.New("bad PKCS#7 padding - too short")
|
||||
ErrorPaddingNotAllTheSame = errors.New("bad PKCS#7 padding - not all the same")
|
||||
)
|
||||
|
||||
// Pad buf using PKCS#7 to a multiple of n.
|
||||
|
||||
335
backend/drive/drive.go
Executable file → Normal file
335
backend/drive/drive.go
Executable file → Normal file
@@ -11,12 +11,14 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -25,7 +27,6 @@ import (
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -50,6 +51,7 @@ import (
|
||||
drive_v2 "google.golang.org/api/drive/v2"
|
||||
drive "google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -70,7 +72,7 @@ const (
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
|
||||
defaultChunkSize = 8 * fs.Mebi
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
|
||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||
defaultXDGIcon = "text-html"
|
||||
@@ -84,7 +86,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
_mimeTypeToExtensionDuplicates = map[string]string{
|
||||
"application/x-vnd.oasis.opendocument.presentation": ".odp",
|
||||
@@ -188,7 +190,7 @@ func init() {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't parse config into struct")
|
||||
return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
|
||||
}
|
||||
|
||||
switch config.State {
|
||||
@@ -226,7 +228,7 @@ func init() {
|
||||
case "teamdrive_config":
|
||||
f, err := newFs(ctx, name, "", m)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make Fs to list Shared Drives")
|
||||
return nil, fmt.Errorf("failed to make Fs to list Shared Drives: %w", err)
|
||||
}
|
||||
teamDrives, err := f.listTeamDrives(ctx)
|
||||
if err != nil {
|
||||
@@ -276,6 +278,7 @@ Leave blank normally.
|
||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
a non root folder as its starting point.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
@@ -299,6 +302,17 @@ a non root folder as its starting point.
|
||||
Default: true,
|
||||
Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_shortcut_content",
|
||||
Default: false,
|
||||
Help: `Server side copy contents of shortcuts instead of the shortcut.
|
||||
|
||||
When doing server side copies, normally rclone will copy shortcuts as
|
||||
shortcuts.
|
||||
|
||||
If this flag is used then rclone will copy the contents of shortcuts
|
||||
rather than shortcuts themselves when doing server side copies.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_gdocs",
|
||||
Default: false,
|
||||
@@ -545,6 +559,35 @@ If this flag is set then rclone will ignore shortcut files completely.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "skip_dangling_shortcuts",
|
||||
Help: `If set skip dangling shortcut files.
|
||||
|
||||
If this is set then rclone will not show any dangling shortcuts in listings.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "resource_key",
|
||||
Help: `Resource key for accessing a link-shared file.
|
||||
|
||||
If you need to access files shared with a link like this
|
||||
|
||||
https://drive.google.com/drive/folders/XXX?resourcekey=YYY&usp=sharing
|
||||
|
||||
Then you will need to use the first part "XXX" as the "root_folder_id"
|
||||
and the second part "YYY" as the "resource_key" otherwise you will get
|
||||
404 not found errors when trying to access the directory.
|
||||
|
||||
See: https://developers.google.com/drive/api/guides/resource-keys
|
||||
|
||||
This resource key requirement only applies to a subset of old files.
|
||||
|
||||
Note also that opening the folder once in the web interface (with the
|
||||
user you've authenticated rclone with) seems to be enough so that the
|
||||
resource key is no needed.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -578,6 +621,7 @@ type Options struct {
|
||||
TeamDriveID string `config:"team_drive"`
|
||||
AuthOwnerOnly bool `config:"auth_owner_only"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
CopyShortcutContent bool `config:"copy_shortcut_content"`
|
||||
SkipGdocs bool `config:"skip_gdocs"`
|
||||
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
@@ -604,6 +648,8 @@ type Options struct {
|
||||
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
||||
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
|
||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||
ResourceKey string `config:"resource_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -629,6 +675,7 @@ type Fs struct {
|
||||
grouping int32 // number of IDs to search at once in ListR - read with atomic
|
||||
listRmu *sync.Mutex // protects listRempties
|
||||
listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable
|
||||
dirResourceKeys *sync.Map // map directory ID to resource key
|
||||
}
|
||||
|
||||
type baseObject struct {
|
||||
@@ -639,6 +686,7 @@ type baseObject struct {
|
||||
mimeType string // The object MIME type
|
||||
bytes int64 // size of the object
|
||||
parents []string // IDs of the parent directories
|
||||
resourceKey *string // resourceKey is needed for link shared objects
|
||||
}
|
||||
type documentObject struct {
|
||||
baseObject
|
||||
@@ -710,6 +758,9 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||
fs.Errorf(f, "Received download limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
|
||||
fs.Errorf(f, "Received upload limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
fs.Errorf(f, "Received Shared Drive file limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
@@ -755,7 +806,7 @@ func (f *Fs) getFile(ctx context.Context, ID string, fields googleapi.Field) (in
|
||||
func (f *Fs) getRootID(ctx context.Context) (string, error) {
|
||||
info, err := f.getFile(ctx, "root", "id")
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "couldn't find root directory ID")
|
||||
return "", fmt.Errorf("couldn't find root directory ID: %w", err)
|
||||
}
|
||||
return info.Id, nil
|
||||
}
|
||||
@@ -779,6 +830,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
|
||||
// If we need to list file inside those shared folders, we must search it without sharedWithMe
|
||||
parentsQuery := bytes.NewBufferString("(")
|
||||
var resourceKeys []string
|
||||
for _, dirID := range dirIDs {
|
||||
if dirID == "" {
|
||||
continue
|
||||
@@ -799,7 +851,12 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
|
||||
}
|
||||
resourceKey, hasResourceKey := f.dirResourceKeys.Load(dirID)
|
||||
if hasResourceKey {
|
||||
resourceKeys = append(resourceKeys, fmt.Sprintf("%s/%s", dirID, resourceKey))
|
||||
}
|
||||
}
|
||||
resourceKeysHeader := strings.Join(resourceKeys, ",")
|
||||
if parentsQuery.Len() > 1 {
|
||||
_ = parentsQuery.WriteByte(')')
|
||||
query = append(query, parentsQuery.String())
|
||||
@@ -808,8 +865,8 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if title != "" {
|
||||
searchTitle := f.opt.Enc.FromStandardName(title)
|
||||
// Escaping the backslash isn't documented but seems to work
|
||||
searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
|
||||
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
|
||||
searchTitle = strings.ReplaceAll(searchTitle, `\`, `\\`)
|
||||
searchTitle = strings.ReplaceAll(searchTitle, `'`, `\'`)
|
||||
|
||||
var titleQuery bytes.Buffer
|
||||
_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
|
||||
@@ -863,7 +920,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
}
|
||||
list.SupportsAllDrives(true)
|
||||
list.IncludeItemsFromAllDrives(true)
|
||||
if f.isTeamDrive {
|
||||
if f.isTeamDrive && !f.opt.SharedWithMe {
|
||||
list.DriveId(f.opt.TeamDriveID)
|
||||
list.Corpora("drive")
|
||||
}
|
||||
@@ -871,6 +928,10 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if f.rootFolderID == "appDataFolder" {
|
||||
list.Spaces("appDataFolder")
|
||||
}
|
||||
// Add resource Keys if necessary
|
||||
if resourceKeysHeader != "" {
|
||||
list.Header().Add("X-Goog-Drive-Resource-Keys", resourceKeysHeader)
|
||||
}
|
||||
|
||||
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields)
|
||||
|
||||
@@ -882,7 +943,7 @@ OUTER:
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "couldn't list directory")
|
||||
return false, fmt.Errorf("couldn't list directory: %w", err)
|
||||
}
|
||||
if files.IncompleteSearch {
|
||||
fs.Errorf(f, "search result INCOMPLETE")
|
||||
@@ -904,7 +965,12 @@ OUTER:
|
||||
}
|
||||
item, err = f.resolveShortcut(ctx, item)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "list")
|
||||
return false, fmt.Errorf("list: %w", err)
|
||||
}
|
||||
// leave the dangling shortcut out of the listings
|
||||
// we've already logged about the dangling shortcut in resolveShortcut
|
||||
if f.opt.SkipDanglingShortcuts && item.MimeType == shortcutMimeTypeDangling {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Check the case of items is correct since
|
||||
@@ -965,7 +1031,7 @@ func fixMimeType(mimeTypeIn string) string {
|
||||
mimeTypeOut = mime.FormatMediaType(mediaType, param)
|
||||
}
|
||||
if mimeTypeOut == "" {
|
||||
panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
|
||||
panic(fmt.Errorf("unable to fix MIME type %q", mimeTypeIn))
|
||||
}
|
||||
return mimeTypeOut
|
||||
}
|
||||
@@ -1000,7 +1066,7 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
|
||||
}
|
||||
mt := mime.TypeByExtension(extension)
|
||||
if mt == "" {
|
||||
return extensions, mimeTypes, errors.Errorf("couldn't find MIME type for extension %q", extension)
|
||||
return extensions, mimeTypes, fmt.Errorf("couldn't find MIME type for extension %q", extension)
|
||||
}
|
||||
if !containsString(extensions, extension) {
|
||||
extensions = append(extensions, extension)
|
||||
@@ -1027,7 +1093,7 @@ func getServiceAccountClient(ctx context.Context, opt *Options, credentialsData
|
||||
scopes := driveScopes(opt.Scope)
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
return nil, fmt.Errorf("error processing credentials: %w", err)
|
||||
}
|
||||
if opt.Impersonate != "" {
|
||||
conf.Subject = opt.Impersonate
|
||||
@@ -1044,19 +1110,19 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
||||
}
|
||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||
}
|
||||
if opt.ServiceAccountCredentials != "" {
|
||||
oAuthClient, err = getServiceAccountClient(ctx, opt, []byte(opt.ServiceAccountCredentials))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create oauth client from service account")
|
||||
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create oauth client")
|
||||
return nil, fmt.Errorf("failed to create oauth client: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1065,10 +1131,10 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if !isPowerOfTwo(int64(cs)) {
|
||||
return errors.Errorf("%v isn't a power of two", cs)
|
||||
return fmt.Errorf("%v isn't a power of two", cs)
|
||||
}
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1106,16 +1172,16 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "drive: upload cutoff")
|
||||
return nil, fmt.Errorf("drive: upload cutoff: %w", err)
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "drive: chunk size")
|
||||
return nil, fmt.Errorf("drive: chunk size: %w", err)
|
||||
}
|
||||
|
||||
oAuthClient, err := createOAuthClient(ctx, opt, name, m)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "drive: failed when making oauth client")
|
||||
return nil, fmt.Errorf("drive: failed when making oauth client: %w", err)
|
||||
}
|
||||
|
||||
root, err := parseDrivePath(path)
|
||||
@@ -1125,15 +1191,16 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
|
||||
m: m,
|
||||
grouping: listRGrouping,
|
||||
listRmu: new(sync.Mutex),
|
||||
listRempties: make(map[string]struct{}),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
|
||||
m: m,
|
||||
grouping: listRGrouping,
|
||||
listRmu: new(sync.Mutex),
|
||||
listRempties: make(map[string]struct{}),
|
||||
dirResourceKeys: new(sync.Map),
|
||||
}
|
||||
f.isTeamDrive = opt.TeamDriveID != ""
|
||||
f.fileFields = f.getFileFields()
|
||||
@@ -1147,15 +1214,15 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
f.svc, err = drive.New(f.client)
|
||||
f.svc, err = drive.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create Drive client")
|
||||
return nil, fmt.Errorf("couldn't create Drive client: %w", err)
|
||||
}
|
||||
|
||||
if f.opt.V2DownloadMinSize >= 0 {
|
||||
f.v2Svc, err = drive_v2.New(f.client)
|
||||
f.v2Svc, err = drive_v2.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create Drive v2 client")
|
||||
return nil, fmt.Errorf("couldn't create Drive v2 client: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1180,7 +1247,8 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
|
||||
// otherwise look up the actual root ID
|
||||
rootID, err := f.getRootID(ctx)
|
||||
if err != nil {
|
||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
var gerr *googleapi.Error
|
||||
if errors.As(err, &gerr) && gerr.Code == 404 {
|
||||
// 404 means that this scope does not have permission to get the
|
||||
// root so just use "root"
|
||||
rootID = "root"
|
||||
@@ -1194,6 +1262,11 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
|
||||
|
||||
// If resource key is set then cache it for the root folder id
|
||||
if f.opt.ResourceKey != "" {
|
||||
f.dirResourceKeys.Store(f.rootFolderID, f.opt.ResourceKey)
|
||||
}
|
||||
|
||||
// Parse extensions
|
||||
if f.opt.Extensions != "" {
|
||||
if f.opt.ExportExtensions != defaultExportExtensions {
|
||||
@@ -1292,12 +1365,16 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
|
||||
}
|
||||
}
|
||||
}
|
||||
return &Object{
|
||||
o := &Object{
|
||||
baseObject: f.newBaseObject(remote, info),
|
||||
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
|
||||
md5sum: strings.ToLower(info.Md5Checksum),
|
||||
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
|
||||
}
|
||||
if info.ResourceKey != "" {
|
||||
o.resourceKey = &info.ResourceKey
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// newDocumentObject creates an fs.Object for a google docs drive.File
|
||||
@@ -1322,7 +1399,7 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
|
||||
func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
|
||||
t := linkTemplate(exportMimeType)
|
||||
if t == nil {
|
||||
return nil, errors.Errorf("unsupported link type %s", exportMimeType)
|
||||
return nil, fmt.Errorf("unsupported link type %s", exportMimeType)
|
||||
}
|
||||
xdgIcon := _mimeTypeToXDGLinkIcons[info.MimeType]
|
||||
if xdgIcon == "" {
|
||||
@@ -1335,7 +1412,7 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
|
||||
info.WebViewLink, info.Name, xdgIcon,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "executing template failed")
|
||||
return nil, fmt.Errorf("executing template failed: %w", err)
|
||||
}
|
||||
|
||||
baseObject := f.newBaseObject(remote+extension, info)
|
||||
@@ -1372,7 +1449,7 @@ func (f *Fs) newObjectWithExportInfo(
|
||||
// will have been resolved so this will do nothing.
|
||||
info, err = f.resolveShortcut(ctx, info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new object")
|
||||
return nil, fmt.Errorf("new object: %w", err)
|
||||
}
|
||||
switch {
|
||||
case info.MimeType == driveFolderType:
|
||||
@@ -1570,6 +1647,15 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
|
||||
}
|
||||
}
|
||||
|
||||
// If using a link type export and a more specific export
|
||||
// hasn't been found all docs should be exported
|
||||
for _, _extension := range f.exportExtensions {
|
||||
_mimeType := mime.TypeByExtension(_extension)
|
||||
if isLinkMimeType(_mimeType) {
|
||||
return _extension, _mimeType, true
|
||||
}
|
||||
}
|
||||
|
||||
// else return empty
|
||||
return "", "", isDocument
|
||||
}
|
||||
@@ -1580,6 +1666,14 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", "", false)
|
||||
func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension, filename, mimeType string, isDocument bool) {
|
||||
// If item has MD5 sum it is a file stored on drive
|
||||
if item.Md5Checksum != "" {
|
||||
return
|
||||
}
|
||||
// Folders can't be documents
|
||||
if item.MimeType == driveFolderType {
|
||||
return
|
||||
}
|
||||
extension, mimeType, isDocument = f.findExportFormatByMimeType(ctx, item.MimeType)
|
||||
if extension != "" {
|
||||
filename = item.Name + extension
|
||||
@@ -1970,7 +2064,7 @@ func splitID(compositeID string) (actualID, shortcutID string) {
|
||||
|
||||
// isShortcutID returns true if compositeID refers to a shortcut
|
||||
func isShortcutID(compositeID string) bool {
|
||||
return strings.IndexRune(compositeID, shortcutSeparator) >= 0
|
||||
return strings.ContainsRune(compositeID, shortcutSeparator)
|
||||
}
|
||||
|
||||
// actualID returns an actual ID from a composite ID
|
||||
@@ -2015,13 +2109,14 @@ func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *dr
|
||||
}
|
||||
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields)
|
||||
if err != nil {
|
||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
var gerr *googleapi.Error
|
||||
if errors.As(err, &gerr) && gerr.Code == 404 {
|
||||
// 404 means dangling shortcut, so just return the shortcut with the mime type mangled
|
||||
fs.Logf(nil, "Dangling shortcut %q detected", item.Name)
|
||||
item.MimeType = shortcutMimeTypeDangling
|
||||
return item, nil
|
||||
}
|
||||
return nil, errors.Wrap(err, "failed to resolve shortcut")
|
||||
return nil, fmt.Errorf("failed to resolve shortcut: %w", err)
|
||||
}
|
||||
// make sure we use the Name, Parents and Trashed from the original item
|
||||
newItem.Name = item.Name
|
||||
@@ -2040,6 +2135,10 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *drive.File
|
||||
case item.MimeType == driveFolderType:
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, item.Id)
|
||||
// cache the resource key for later lookups
|
||||
if item.ResourceKey != "" {
|
||||
f.dirResourceKeys.Store(item.Id, item.ResourceKey)
|
||||
}
|
||||
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
|
||||
d := fs.NewDir(remote, when).SetID(item.Id)
|
||||
if len(item.Parents) > 0 {
|
||||
@@ -2123,10 +2222,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
|
||||
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
|
||||
if exportExt == "" {
|
||||
return nil, errors.Errorf("No export format found for %q", importMimeType)
|
||||
return nil, fmt.Errorf("no export format found for %q", importMimeType)
|
||||
}
|
||||
if exportExt != srcExt && !f.opt.AllowImportNameChange {
|
||||
return nil, errors.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
|
||||
return nil, fmt.Errorf("can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2194,7 +2293,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
|
||||
return fmt.Errorf("MergeDirs list failed on %v: %w", srcDir, err)
|
||||
}
|
||||
// move them into place
|
||||
for _, info := range infos {
|
||||
@@ -2210,14 +2309,14 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
|
||||
return fmt.Errorf("MergeDirs move failed on %q in %v: %w", info.Name, srcDir, err)
|
||||
}
|
||||
}
|
||||
// rmdir (into trash) the now empty source directory
|
||||
fs.Infof(srcDir, "removing empty directory")
|
||||
err = f.delete(ctx, srcDir.ID(), true)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
||||
return fmt.Errorf("MergeDirs move failed to rmdir %q: %w", srcDir, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -2280,7 +2379,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return err
|
||||
}
|
||||
if found {
|
||||
return errors.Errorf("directory not empty")
|
||||
return fmt.Errorf("directory not empty")
|
||||
}
|
||||
}
|
||||
if root != "" {
|
||||
@@ -2372,16 +2471,24 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
createInfo.Description = ""
|
||||
}
|
||||
|
||||
// get the ID of the thing to copy - this is the shortcut if available
|
||||
// get the ID of the thing to copy
|
||||
// copy the contents if CopyShortcutContent
|
||||
// else copy the shortcut only
|
||||
|
||||
id := shortcutID(srcObj.id)
|
||||
|
||||
if f.opt.CopyShortcutContent {
|
||||
id = actualID(srcObj.id)
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Copy(id, createInfo).
|
||||
copy := f.svc.Files.Copy(id, createInfo).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever).
|
||||
Context(ctx).Do()
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever)
|
||||
srcObj.addResourceKey(copy.Header())
|
||||
info, err = copy.Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -2423,7 +2530,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
if f.opt.TrashedOnly {
|
||||
return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
|
||||
return errors.New("can't purge with --drive-trashed-only, use delete if you want to selectively delete files")
|
||||
}
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
@@ -2458,7 +2565,7 @@ func (f *Fs) cleanupTeamDrive(ctx context.Context, dir string, directoryID strin
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to list directory")
|
||||
err = fmt.Errorf("failed to list directory: %w", err)
|
||||
r.Errors++
|
||||
fs.Errorf(dir, "%v", err)
|
||||
}
|
||||
@@ -2502,7 +2609,7 @@ func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get Shared Drive info")
|
||||
return fmt.Errorf("failed to get Shared Drive info: %w", err)
|
||||
}
|
||||
fs.Debugf(f, "read info from Shared Drive %q", td.Name)
|
||||
return err
|
||||
@@ -2525,7 +2632,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get Drive storageQuota")
|
||||
return nil, fmt.Errorf("failed to get Drive storageQuota: %w", err)
|
||||
}
|
||||
q := about.StorageQuota
|
||||
usage := &fs.Usage{
|
||||
@@ -2849,7 +2956,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
|
||||
chunkSizeInt, err := strconv.ParseInt(chunkSizeString, 10, 64)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't convert chunk size to int")
|
||||
return fmt.Errorf("couldn't convert chunk size to int: %w", err)
|
||||
}
|
||||
chunkSize := fs.SizeSuffix(chunkSizeInt)
|
||||
if chunkSize == f.opt.ChunkSize {
|
||||
@@ -2886,17 +2993,17 @@ func (f *Fs) changeServiceAccountFile(ctx context.Context, file string) (err err
|
||||
f.opt.ServiceAccountCredentials = ""
|
||||
oAuthClient, err := createOAuthClient(ctx, &f.opt, f.name, f.m)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "drive: failed when making oauth client")
|
||||
return fmt.Errorf("drive: failed when making oauth client: %w", err)
|
||||
}
|
||||
f.client = oAuthClient
|
||||
f.svc, err = drive.New(f.client)
|
||||
f.svc, err = drive.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't create Drive client")
|
||||
return fmt.Errorf("couldn't create Drive client: %w", err)
|
||||
}
|
||||
if f.opt.V2DownloadMinSize >= 0 {
|
||||
f.v2Svc, err = drive_v2.New(f.client)
|
||||
f.v2Svc, err = drive_v2.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't create Drive v2 client")
|
||||
return fmt.Errorf("couldn't create Drive v2 client: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -2925,12 +3032,12 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
||||
isDir = true
|
||||
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
|
||||
if err != fs.ErrorIsDir {
|
||||
return nil, errors.Wrap(err, "can't find source")
|
||||
return nil, fmt.Errorf("can't find source: %w", err)
|
||||
}
|
||||
// source was a directory
|
||||
srcID, err = srcFs.dirCache.FindDir(ctx, srcPath, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to find source dir")
|
||||
return nil, fmt.Errorf("failed to find source dir: %w", err)
|
||||
}
|
||||
isDir = true
|
||||
} else {
|
||||
@@ -2947,13 +3054,13 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
||||
} else if err == fs.ErrorIsDir {
|
||||
err = errors.New("existing directory")
|
||||
}
|
||||
return nil, errors.Wrap(err, "not overwriting shortcut target")
|
||||
return nil, fmt.Errorf("not overwriting shortcut target: %w", err)
|
||||
}
|
||||
|
||||
// Create destination shortcut
|
||||
createInfo, err := dstFs.createFileInfo(ctx, dstPath, time.Now())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "shortcut destination failed")
|
||||
return nil, fmt.Errorf("shortcut destination failed: %w", err)
|
||||
}
|
||||
createInfo.MimeType = shortcutMimeType
|
||||
createInfo.ShortcutDetails = &drive.FileShortcutDetails{
|
||||
@@ -2970,7 +3077,7 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
||||
return dstFs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "shortcut creation failed")
|
||||
return nil, fmt.Errorf("shortcut creation failed: %w", err)
|
||||
}
|
||||
if isDir {
|
||||
return nil, nil
|
||||
@@ -2990,7 +3097,7 @@ func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.Drive, err err
|
||||
return defaultFs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return drives, errors.Wrap(err, "listing Team Drives failed")
|
||||
return drives, fmt.Errorf("listing Team Drives failed: %w", err)
|
||||
}
|
||||
drives = append(drives, teamDrives.Drives...)
|
||||
if teamDrives.NextPageToken == "" {
|
||||
@@ -3033,7 +3140,7 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to restore")
|
||||
err = fmt.Errorf("failed to restore: %w", err)
|
||||
r.Errors++
|
||||
fs.Errorf(remote, "%v", err)
|
||||
} else {
|
||||
@@ -3050,7 +3157,7 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to list directory")
|
||||
err = fmt.Errorf("failed to list directory: %w", err)
|
||||
r.Errors++
|
||||
fs.Errorf(dir, "%v", err)
|
||||
}
|
||||
@@ -3074,10 +3181,10 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
||||
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
info, err := f.getFile(ctx, id, f.fileFields)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't find id")
|
||||
return fmt.Errorf("couldn't find id: %w", err)
|
||||
}
|
||||
if info.MimeType == driveFolderType {
|
||||
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||
return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||
}
|
||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||
o, err := f.newObjectWithInfo(ctx, info.Name, info)
|
||||
@@ -3100,7 +3207,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
}
|
||||
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "copy failed")
|
||||
return fmt.Errorf("copy failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -3183,7 +3290,7 @@ This will return a JSON list of objects like this
|
||||
|
||||
With the -o config parameter it will output the list in a format
|
||||
suitable for adding to a config file to make aliases for all the
|
||||
drives found.
|
||||
drives found and a combined drive.
|
||||
|
||||
[My Drive]
|
||||
type = alias
|
||||
@@ -3193,10 +3300,15 @@ drives found.
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. This may require manual editing
|
||||
of the names.
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal charactes will be
|
||||
substituted with "_" and duplicate names will have numbers suffixed.
|
||||
It will also add a remote called AllDrives which shows all the shared
|
||||
drives combined into one directory tree.
|
||||
`,
|
||||
}, {
|
||||
Name: "untrash",
|
||||
@@ -3244,6 +3356,12 @@ attempted if possible.
|
||||
|
||||
Use the -i flag to see what would be copied before copying.
|
||||
`,
|
||||
}, {
|
||||
Name: "exportformats",
|
||||
Short: "Dump the export formats for debug purposes",
|
||||
}, {
|
||||
Name: "importformats",
|
||||
Short: "Dump the import formats for debug purposes",
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
@@ -3263,7 +3381,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
out["service_account_file"] = f.opt.ServiceAccountFile
|
||||
}
|
||||
if _, ok := opt["chunk_size"]; ok {
|
||||
out["chunk_size"] = fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||
out["chunk_size"] = f.opt.ChunkSize.String()
|
||||
}
|
||||
return out, nil
|
||||
case "set":
|
||||
@@ -3280,11 +3398,11 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
if chunkSize, ok := opt["chunk_size"]; ok {
|
||||
chunkSizeMap := make(map[string]string)
|
||||
chunkSizeMap["previous"] = fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||
chunkSizeMap["previous"] = f.opt.ChunkSize.String()
|
||||
if err = f.changeChunkSize(chunkSize); err != nil {
|
||||
return out, err
|
||||
}
|
||||
chunkSizeString := fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||
chunkSizeString := f.opt.ChunkSize.String()
|
||||
f.m.Set("chunk_size", chunkSizeString)
|
||||
chunkSizeMap["current"] = chunkSizeString
|
||||
out["chunk_size"] = chunkSizeMap
|
||||
@@ -3299,7 +3417,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
if ok {
|
||||
targetFs, err := cache.Get(ctx, target)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't find target")
|
||||
return nil, fmt.Errorf("couldn't find target: %w", err)
|
||||
}
|
||||
dstFs, ok = targetFs.(*Fs)
|
||||
if !ok {
|
||||
@@ -3312,14 +3430,30 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
re := regexp.MustCompile(`[^\w_. -]+`)
|
||||
if _, ok := opt["config"]; ok {
|
||||
lines := []string{}
|
||||
for _, drive := range drives {
|
||||
upstreams := []string{}
|
||||
names := make(map[string]struct{}, len(drives))
|
||||
for i, drive := range drives {
|
||||
name := re.ReplaceAllString(drive.Name, "_")
|
||||
for {
|
||||
if _, found := names[name]; !found {
|
||||
break
|
||||
}
|
||||
name += fmt.Sprintf("-%d", i)
|
||||
}
|
||||
names[name] = struct{}{}
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, fmt.Sprintf("[%s]", drive.Name))
|
||||
lines = append(lines, fmt.Sprintf("type = alias"))
|
||||
lines = append(lines, fmt.Sprintf("[%s]", name))
|
||||
lines = append(lines, "type = alias")
|
||||
lines = append(lines, fmt.Sprintf("remote = %s,team_drive=%s,root_folder_id=:", f.name, drive.Id))
|
||||
upstreams = append(upstreams, fmt.Sprintf(`"%s=%s:"`, name, name))
|
||||
}
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, "[AllDrives]")
|
||||
lines = append(lines, "type = combine")
|
||||
lines = append(lines, fmt.Sprintf("upstreams = %s", strings.Join(upstreams, " ")))
|
||||
return lines, nil
|
||||
}
|
||||
return drives, nil
|
||||
@@ -3338,10 +3472,14 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
arg = arg[2:]
|
||||
err = f.copyID(ctx, id, dest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed copying %q to %q", id, dest)
|
||||
return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
case "exportformats":
|
||||
return f.exportFormats(ctx), nil
|
||||
case "importformats":
|
||||
return f.importFormats(ctx), nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
@@ -3391,12 +3529,6 @@ func (o *baseObject) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// getRemoteInfo returns a drive.File for the remote
|
||||
func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File, err error) {
|
||||
info, _, _, _, _, err = f.getRemoteInfoWithExport(ctx, remote)
|
||||
return
|
||||
}
|
||||
|
||||
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
||||
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
||||
@@ -3478,6 +3610,14 @@ func (o *baseObject) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// addResourceKey adds a X-Goog-Drive-Resource-Keys header for this
|
||||
// object if required.
|
||||
func (o *baseObject) addResourceKey(header http.Header) {
|
||||
if o.resourceKey != nil {
|
||||
header.Add("X-Goog-Drive-Resource-Keys", fmt.Sprintf("%s/%s", o.id, *o.resourceKey))
|
||||
}
|
||||
}
|
||||
|
||||
// httpResponse gets an http.Response object for the object
|
||||
// using the url and method passed in
|
||||
func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
|
||||
@@ -3493,6 +3633,7 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
|
||||
// Don't supply range requests for 0 length objects as they always fail
|
||||
delete(req.Header, "Range")
|
||||
}
|
||||
o.addResourceKey(req.Header)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.client.Do(req)
|
||||
if err == nil {
|
||||
@@ -3572,11 +3713,11 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
|
||||
url += "acknowledgeAbuse=true"
|
||||
_, res, err = o.httpResponse(ctx, url, "GET", options)
|
||||
} else {
|
||||
err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
|
||||
err = fmt.Errorf("use the --drive-acknowledge-abuse flag to download this file: %w", err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open file failed")
|
||||
return nil, fmt.Errorf("open file failed: %w", err)
|
||||
}
|
||||
}
|
||||
return res.Body, nil
|
||||
@@ -3740,14 +3881,14 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
|
||||
}
|
||||
|
||||
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
|
||||
return errors.Errorf("can't update google document type without --drive-import-formats")
|
||||
return fmt.Errorf("can't update google document type without --drive-import-formats")
|
||||
}
|
||||
importMimeType = o.fs.findImportFormat(ctx, updateInfo.MimeType)
|
||||
if importMimeType == "" {
|
||||
return errors.Errorf("no import format found for %q", srcMimeType)
|
||||
return fmt.Errorf("no import format found for %q", srcMimeType)
|
||||
}
|
||||
if importMimeType != o.documentMimeType {
|
||||
return errors.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
|
||||
return fmt.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
|
||||
}
|
||||
updateInfo.MimeType = importMimeType
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -15,10 +16,10 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
@@ -28,6 +29,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func TestDriveScopes(t *testing.T) {
|
||||
@@ -190,6 +192,60 @@ func TestExtensionsForImportFormats(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestShouldRetry(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
gatewayTimeout := googleapi.Error{
|
||||
Code: 503,
|
||||
}
|
||||
timeoutRetry, timeoutError := f.shouldRetry(ctx, &gatewayTimeout)
|
||||
assert.True(t, timeoutRetry)
|
||||
assert.Equal(t, &gatewayTimeout, timeoutError)
|
||||
generic403 := googleapi.Error{
|
||||
Code: 403,
|
||||
}
|
||||
rLEItem := googleapi.ErrorItem{
|
||||
Reason: "rateLimitExceeded",
|
||||
Message: "User rate limit exceeded.",
|
||||
}
|
||||
generic403.Errors = append(generic403.Errors, rLEItem)
|
||||
oldStopUpload := f.opt.StopOnUploadLimit
|
||||
oldStopDownload := f.opt.StopOnDownloadLimit
|
||||
f.opt.StopOnUploadLimit = true
|
||||
f.opt.StopOnDownloadLimit = true
|
||||
defer func() {
|
||||
f.opt.StopOnUploadLimit = oldStopUpload
|
||||
f.opt.StopOnDownloadLimit = oldStopDownload
|
||||
}()
|
||||
expectedRLError := fserrors.FatalError(&generic403)
|
||||
rateLimitRetry, rateLimitErr := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, rateLimitRetry)
|
||||
assert.Equal(t, rateLimitErr, expectedRLError)
|
||||
dQEItem := googleapi.ErrorItem{
|
||||
Reason: "downloadQuotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = dQEItem
|
||||
expectedDQError := fserrors.FatalError(&generic403)
|
||||
downloadQuotaRetry, downloadQuotaError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, downloadQuotaRetry)
|
||||
assert.Equal(t, downloadQuotaError, expectedDQError)
|
||||
tDFLEItem := googleapi.ErrorItem{
|
||||
Reason: "teamDriveFileLimitExceeded",
|
||||
}
|
||||
generic403.Errors[0] = tDFLEItem
|
||||
expectedTDFLError := fserrors.FatalError(&generic403)
|
||||
teamDriveFileLimitRetry, teamDriveFileLimitError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, teamDriveFileLimitRetry)
|
||||
assert.Equal(t, teamDriveFileLimitError, expectedTDFLError)
|
||||
qEItem := googleapi.ErrorItem{
|
||||
Reason: "quotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = qEItem
|
||||
expectedQuotaError := fserrors.FatalError(&generic403)
|
||||
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, quotaExceededRetry)
|
||||
assert.Equal(t, quotaExceededError, expectedQuotaError)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
oldAllow := f.opt.AllowImportNameChange
|
||||
f.opt.AllowImportNameChange = true
|
||||
@@ -378,9 +434,9 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||
// Make some objects, one in a subdir
|
||||
contents := random.String(100)
|
||||
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
|
||||
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
|
||||
obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
|
||||
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
|
||||
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
|
||||
_ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
|
||||
|
||||
// Check objects
|
||||
checkObjects := func() {
|
||||
@@ -422,11 +478,7 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
}()
|
||||
dir := t.TempDir()
|
||||
|
||||
checkFile := func(name string) {
|
||||
filePath := filepath.Join(dir, name)
|
||||
@@ -491,24 +543,16 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
subFs, isDriveFs := subFsResult.(*Fs)
|
||||
require.True(t, isDriveFs)
|
||||
|
||||
tempDir1, err := ioutil.TempDir("", "rclone-drive-agequery1-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir1)
|
||||
}()
|
||||
tempDir1 := t.TempDir()
|
||||
tempFs1, err := fs.NewFs(defCtx, tempDir1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tempDir2, err := ioutil.TempDir("", "rclone-drive-agequery2-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir2)
|
||||
}()
|
||||
tempDir2 := t.TempDir()
|
||||
tempFs2, err := fs.NewFs(defCtx, tempDir2)
|
||||
require.NoError(t, err)
|
||||
|
||||
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
|
||||
_, _ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
|
||||
_ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
|
||||
|
||||
// validate sync/copy
|
||||
const timeQuery = "(modifiedTime >= '"
|
||||
@@ -557,6 +601,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -8,13 +8,13 @@ package dropbox
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
@@ -66,7 +66,7 @@ type batcherResponse struct {
|
||||
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
|
||||
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
|
||||
if size > maxBatchSize || size < 0 {
|
||||
return nil, errors.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
|
||||
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
|
||||
}
|
||||
|
||||
async := false
|
||||
@@ -91,7 +91,7 @@ func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.
|
||||
case "off":
|
||||
size = 0
|
||||
default:
|
||||
return nil, errors.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
|
||||
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
|
||||
}
|
||||
|
||||
b := &batcher{
|
||||
@@ -118,12 +118,12 @@ func (b *batcher) Batching() bool {
|
||||
}
|
||||
|
||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
|
||||
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||
var arg = &files.UploadSessionFinishBatchArg{
|
||||
Entries: items,
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
|
||||
complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
@@ -135,9 +135,9 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "batch commit failed")
|
||||
return nil, fmt.Errorf("batch commit failed: %w", err)
|
||||
}
|
||||
return batchStatus, nil
|
||||
return complete, nil
|
||||
}
|
||||
|
||||
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
||||
@@ -180,7 +180,7 @@ func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *f
|
||||
if err == nil {
|
||||
err = errors.New("batch didn't complete")
|
||||
}
|
||||
return nil, errors.Wrapf(err, "wait for batch failed after %d tries in %v", try, time.Since(startTime))
|
||||
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
|
||||
}
|
||||
|
||||
// commit a batch
|
||||
@@ -199,30 +199,15 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
fs.Debugf(b.f, "Committing %s", desc)
|
||||
|
||||
// finalise the batch getting either a result or a job id to poll
|
||||
batchStatus, err := b.finishBatch(ctx, items)
|
||||
complete, err := b.finishBatch(ctx, items)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check whether batch is complete
|
||||
var complete *files.UploadSessionFinishBatchResult
|
||||
switch batchStatus.Tag {
|
||||
case "async_job_id":
|
||||
// wait for batch to complete
|
||||
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case "complete":
|
||||
complete = batchStatus.Complete
|
||||
default:
|
||||
return errors.Errorf("batch returned unknown status %q", batchStatus.Tag)
|
||||
}
|
||||
|
||||
// Check we got the right number of entries
|
||||
entries := complete.Entries
|
||||
if len(entries) != len(results) {
|
||||
return errors.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
||||
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
||||
}
|
||||
|
||||
// Report results to clients
|
||||
@@ -250,7 +235,7 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
errorTag += "/" + item.Failure.PropertiesError.Tag
|
||||
}
|
||||
}
|
||||
resp.err = errors.Errorf("batch upload failed: %s", errorTag)
|
||||
resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
|
||||
}
|
||||
if !b.async {
|
||||
results[i] <- resp
|
||||
@@ -261,7 +246,7 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
|
||||
// Report an error if any failed in the batch
|
||||
if errorTag != "" {
|
||||
return errors.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
|
||||
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
|
||||
}
|
||||
|
||||
fs.Debugf(b.f, "Committed %s", desc)
|
||||
@@ -319,6 +304,9 @@ outer:
|
||||
//
|
||||
// Can be called from atexit handler
|
||||
func (b *batcher) Shutdown() {
|
||||
if !b.Batching() {
|
||||
return
|
||||
}
|
||||
b.shutOnce.Do(func() {
|
||||
atexit.Unregister(b.atexit)
|
||||
fs.Infof(b.f, "Commiting uploads - please wait...")
|
||||
|
||||
117
backend/dropbox/dropbox.go
Executable file → Normal file
117
backend/dropbox/dropbox.go
Executable file → Normal file
@@ -23,6 +23,7 @@ of path_display and all will be well.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
@@ -38,7 +39,6 @@ import (
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/sharing"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/team"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -363,24 +363,24 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if err == nil {
|
||||
return false, err
|
||||
}
|
||||
baseErrString := errors.Cause(err).Error()
|
||||
errString := err.Error()
|
||||
// First check for specific errors
|
||||
if strings.Contains(baseErrString, "insufficient_space") {
|
||||
if strings.Contains(errString, "insufficient_space") {
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if strings.Contains(baseErrString, "malformed_path") {
|
||||
} else if strings.Contains(errString, "malformed_path") {
|
||||
return false, fserrors.NoRetryError(err)
|
||||
}
|
||||
// Then handle any official Retry-After header from Dropbox's SDK
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Logf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behavior for backward compatibility
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
|
||||
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
@@ -389,10 +389,10 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.SizeSuffixBase
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
if cs > maxChunkSize {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -415,7 +415,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "dropbox: chunk size")
|
||||
return nil, fmt.Errorf("dropbox: chunk size: %w", err)
|
||||
}
|
||||
|
||||
// Convert the old token if it exists. The old token was just
|
||||
@@ -427,13 +427,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFS convert token")
|
||||
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure dropbox")
|
||||
return nil, fmt.Errorf("failed to configure dropbox: %w", err)
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -472,9 +472,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
args := team.NewMembersGetInfoArgs(members)
|
||||
|
||||
memberIds, err := f.team.MembersGetInfo(args)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
|
||||
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
||||
}
|
||||
if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
|
||||
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
|
||||
}
|
||||
|
||||
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||
@@ -551,7 +553,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get current account failed")
|
||||
return nil, fmt.Errorf("get current account failed: %w", err)
|
||||
}
|
||||
switch x := acc.RootInfo.(type) {
|
||||
case *common.TeamRootInfo:
|
||||
@@ -559,22 +561,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
case *common.UserRootInfo:
|
||||
f.ns = x.RootNamespaceId
|
||||
default:
|
||||
return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
|
||||
return nil, fmt.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
|
||||
}
|
||||
fs.Debugf(f, "Using root namespace %q", f.ns)
|
||||
}
|
||||
f.setRoot(root)
|
||||
|
||||
// See if the root is actually an object
|
||||
_, err = f.getFileMetadata(ctx, f.slashRoot)
|
||||
if err == nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
if f.root != "" {
|
||||
_, err = f.getFileMetadata(ctx, f.slashRoot)
|
||||
if err == nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
}
|
||||
f.setRoot(newRoot)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
f.setRoot(newRoot)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
@@ -710,7 +714,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
return nil, fmt.Errorf("list continue: %w", err)
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
@@ -784,7 +788,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
return nil, fmt.Errorf("list continue: %w", err)
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
@@ -850,6 +854,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
arg := files.ListFolderArg{
|
||||
Path: f.opt.Enc.FromStandardPath(root),
|
||||
Recursive: false,
|
||||
Limit: 1000,
|
||||
}
|
||||
if root == "/" {
|
||||
arg.Path = "" // Specify root folder as empty string
|
||||
@@ -877,7 +882,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
return nil, fmt.Errorf("list continue: %w", err)
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
@@ -989,7 +994,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
// check directory exists
|
||||
_, err = f.getDirMetadata(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
return fmt.Errorf("Rmdir: %w", err)
|
||||
}
|
||||
|
||||
root = f.opt.Enc.FromStandardPath(root)
|
||||
@@ -1007,7 +1012,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
return fmt.Errorf("Rmdir: %w", err)
|
||||
}
|
||||
if len(res.Entries) != 0 {
|
||||
return errors.New("directory not empty")
|
||||
@@ -1073,7 +1078,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "copy failed")
|
||||
return nil, fmt.Errorf("copy failed: %w", err)
|
||||
}
|
||||
|
||||
// Set the metadata
|
||||
@@ -1083,7 +1088,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
err = dstObj.setMetadataFromEntry(fileInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "copy failed")
|
||||
return nil, fmt.Errorf("copy failed: %w", err)
|
||||
}
|
||||
|
||||
return dstObj, nil
|
||||
@@ -1134,7 +1139,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "move failed")
|
||||
return nil, fmt.Errorf("move failed: %w", err)
|
||||
}
|
||||
|
||||
// Set the metadata
|
||||
@@ -1144,7 +1149,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
err = dstObj.setMetadataFromEntry(fileInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "move failed")
|
||||
return nil, fmt.Errorf("move failed: %w", err)
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
@@ -1194,7 +1199,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return
|
||||
}
|
||||
if len(listRes.Links) == 0 {
|
||||
err = errors.New("Dropbox says the sharing link already exists, but list came back empty")
|
||||
err = errors.New("sharing link already exists, but list came back empty")
|
||||
return
|
||||
}
|
||||
linkRes = listRes.Links[0]
|
||||
@@ -1206,7 +1211,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
case *sharing.FolderLinkMetadata:
|
||||
link = res.Url
|
||||
default:
|
||||
err = fmt.Errorf("Don't know how to extract link, response has unknown format: %T", res)
|
||||
err = fmt.Errorf("don't know how to extract link, response has unknown format: %T", res)
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -1252,7 +1257,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "MoveDir failed")
|
||||
return fmt.Errorf("MoveDir failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1266,7 +1271,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
return nil, err
|
||||
}
|
||||
var total uint64
|
||||
if q.Allocation != nil {
|
||||
@@ -1367,10 +1372,12 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
|
||||
if timeout < 30 {
|
||||
timeout = 30
|
||||
fs.Debugf(f, "Increasing poll interval to minimum 30s")
|
||||
}
|
||||
|
||||
if timeout > 480 {
|
||||
timeout = 480
|
||||
fs.Debugf(f, "Decreasing poll interval to maximum 480s")
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -1406,7 +1413,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "list continue")
|
||||
return "", fmt.Errorf("list continue: %w", err)
|
||||
}
|
||||
cursor = changeList.Cursor
|
||||
var entryType fs.EntryType
|
||||
@@ -1485,7 +1492,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
}
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to read hash from metadata")
|
||||
return "", fmt.Errorf("failed to read hash from metadata: %w", err)
|
||||
}
|
||||
return o.hash, nil
|
||||
}
|
||||
@@ -1647,13 +1654,37 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
}
|
||||
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
skip := int64(0)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
if _, err = chunk.Seek(skip, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
||||
// after session is started, we retry everything
|
||||
if err != nil {
|
||||
// Check for incorrect offset error and retry with new offset
|
||||
if uErr, ok := err.(files.UploadSessionAppendV2APIError); ok {
|
||||
if uErr.EndpointError != nil && uErr.EndpointError.IncorrectOffset != nil {
|
||||
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
|
||||
delta := int64(correctOffset) - int64(cursor.Offset)
|
||||
skip += delta
|
||||
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||
if skip < 0 {
|
||||
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
|
||||
} else if skip == chunkSize {
|
||||
fs.Debugf(o, "%s: chunk received OK - continuing", what)
|
||||
return false, nil
|
||||
} else if skip > chunkSize {
|
||||
// This error should never happen
|
||||
return false, fmt.Errorf("can't seek forwards by more than a chunk to correct offset: %s", what)
|
||||
}
|
||||
// Skip the sent data on next retry
|
||||
cursor.Offset = uint64(int64(cursor.Offset) + delta)
|
||||
fs.Debugf(o, "%s: skipping bytes on retry to fix offset", what)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1738,7 +1769,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||
return fserrors.NoRetryError(fmt.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||
}
|
||||
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
|
||||
commitInfo.Mode.Tag = "overwrite"
|
||||
@@ -1757,12 +1788,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
|
||||
} else {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
entry, err = o.fs.srv.Upload(commitInfo, in)
|
||||
entry, err = o.fs.srv.Upload(&files.UploadArg{CommitInfo: *commitInfo}, in)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "upload failed")
|
||||
return fmt.Errorf("upload failed: %w", err)
|
||||
}
|
||||
// If we haven't received data back from batch upload then fake it
|
||||
//
|
||||
|
||||
@@ -2,6 +2,8 @@ package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -10,7 +12,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -27,25 +28,44 @@ var retryErrorCodes = []int{
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
|
||||
|
||||
func parseFichierError(err error) int {
|
||||
matches := errorRegex.FindStringSubmatch(err.Error())
|
||||
if len(matches) == 0 {
|
||||
return 0
|
||||
}
|
||||
code, err := strconv.Atoi(matches[0])
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "failed parsing fichier error: %v", err)
|
||||
return 0
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// Detect this error which the integration tests provoke
|
||||
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
||||
// 1Fichier uses HTTP error code 403 (Forbidden) for all kinds of errors with
|
||||
// responses looking like this: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
||||
//
|
||||
// https://1fichier.com/api.html
|
||||
//
|
||||
// file/ls.cgi is limited :
|
||||
//
|
||||
// Warning (can be changed in case of abuses) :
|
||||
// List all files of the account is limited to 1 request per hour.
|
||||
// List folders is limited to 5 000 results and 1 request per folder per 30s.
|
||||
if err != nil && strings.Contains(err.Error(), "Flood detected") {
|
||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
// We attempt to parse the actual 1Fichier error code from this body and handle it accordingly
|
||||
// Most importantly #374 (Flood detected: IP locked) which the integration tests provoke
|
||||
// The list below is far from complete and should be expanded if we see any more error codes.
|
||||
if err != nil {
|
||||
switch parseFichierError(err) {
|
||||
case 93:
|
||||
return false, err // No such user
|
||||
case 186:
|
||||
return false, err // IP blocked?
|
||||
case 374:
|
||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
default:
|
||||
}
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
@@ -81,7 +101,7 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read file info")
|
||||
return nil, fmt.Errorf("couldn't read file info: %w", err)
|
||||
}
|
||||
|
||||
return &file, err
|
||||
@@ -110,7 +130,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
||||
return doretry || !validToken(&token), err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
|
||||
return &token, nil
|
||||
@@ -144,7 +164,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
|
||||
entries = make([]fs.DirEntry, len(sharedFiles))
|
||||
@@ -173,7 +193,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
for i := range filesList.Items {
|
||||
item := &filesList.Items[i]
|
||||
@@ -201,7 +221,7 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list folders")
|
||||
return nil, fmt.Errorf("couldn't list folders: %w", err)
|
||||
}
|
||||
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
|
||||
for i := range foldersList.SubFolders {
|
||||
@@ -295,7 +315,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create folder")
|
||||
return nil, fmt.Errorf("couldn't create folder: %w", err)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
|
||||
@@ -322,10 +342,10 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't remove folder")
|
||||
return nil, fmt.Errorf("couldn't remove folder: %w", err)
|
||||
}
|
||||
if response.Status != "OK" {
|
||||
return nil, errors.Errorf("can't remove folder: %s", response.Message)
|
||||
return nil, fmt.Errorf("can't remove folder: %s", response.Message)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
||||
@@ -352,7 +372,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't remove file")
|
||||
return nil, fmt.Errorf("couldn't remove file: %w", err)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Removed file with url `%s`", url)
|
||||
@@ -379,7 +399,7 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't copy file")
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
@@ -404,7 +424,7 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't copy file")
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
@@ -432,7 +452,7 @@ func (f *Fs) renameFile(ctx context.Context, url string, newName string) (respon
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't rename file")
|
||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
@@ -453,7 +473,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "didnt got an upload node")
|
||||
return nil, fmt.Errorf("didnt got an upload node: %w", err)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got Upload node")
|
||||
@@ -467,7 +487,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
||||
fileName = f.opt.Enc.FromStandardName(fileName)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
return nil, errors.New("invalid UploadID")
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
@@ -497,7 +517,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't upload file")
|
||||
return nil, fmt.Errorf("couldn't upload file: %w", err)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Uploaded File `%s`", fileName)
|
||||
@@ -509,7 +529,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
|
||||
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
return nil, errors.New("invalid UploadID")
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
@@ -531,7 +551,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't finish file upload")
|
||||
return nil, fmt.Errorf("couldn't finish file upload: %w", err)
|
||||
}
|
||||
|
||||
return response, err
|
||||
|
||||
@@ -2,6 +2,7 @@ package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -9,7 +10,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -42,18 +42,15 @@ func init() {
|
||||
}, {
|
||||
Help: "If you want to download a shared folder, add this parameter.",
|
||||
Name: "shared_folder",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Help: "If you want to download a shared file that is password protected, add this parameter.",
|
||||
Name: "file_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
|
||||
Name: "folder_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
@@ -297,7 +294,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
path, ok := f.dirCache.GetInv(directoryID)
|
||||
|
||||
if !ok {
|
||||
return nil, errors.New("Cannot find dir in dircache")
|
||||
return nil, errors.New("cannot find dir in dircache")
|
||||
}
|
||||
|
||||
return f.newObjectFromFile(ctx, path, file), nil
|
||||
@@ -454,10 +451,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if currentDirectoryID == directoryID {
|
||||
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't rename file")
|
||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.Errorf("couldn't rename file: %s", resp.Message)
|
||||
return nil, fmt.Errorf("couldn't rename file: %s", resp.Message)
|
||||
}
|
||||
url = resp.URLs[0].URL
|
||||
} else {
|
||||
@@ -467,10 +464,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
|
||||
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
|
||||
}
|
||||
url = resp.URLs[0]
|
||||
}
|
||||
@@ -503,10 +500,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
|
||||
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
|
||||
}
|
||||
|
||||
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
|
||||
@@ -517,6 +514,32 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/user/info.cgi",
|
||||
ContentType: "application/json",
|
||||
}
|
||||
var accountInfo AccountInfo
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rest.CallJSON(ctx, &opts, nil, &accountInfo)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read user info: %w", err)
|
||||
}
|
||||
|
||||
// FIXME max upload size would be useful to use in Update
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(accountInfo.ColdStorage), // bytes in use
|
||||
Total: fs.NewUsageValue(accountInfo.AvailableColdStorage), // bytes total
|
||||
Free: fs.NewUsageValue(accountInfo.AvailableColdStorage - accountInfo.ColdStorage), // bytes free
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
|
||||
@@ -2,11 +2,12 @@ package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -122,7 +123,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// Delete duplicate after successful upload
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to remove old version")
|
||||
return fmt.Errorf("failed to remove old version: %w", err)
|
||||
}
|
||||
|
||||
// Replace guts of old object with new one
|
||||
|
||||
@@ -182,3 +182,34 @@ type FoldersList struct {
|
||||
Status string `json:"Status"`
|
||||
SubFolders []Folder `json:"sub_folders"`
|
||||
}
|
||||
|
||||
// AccountInfo is the structure how 1Fichier returns user info
|
||||
type AccountInfo struct {
|
||||
StatsDate string `json:"stats_date"`
|
||||
MailRM string `json:"mail_rm"`
|
||||
DefaultQuota int64 `json:"default_quota"`
|
||||
UploadForbidden string `json:"upload_forbidden"`
|
||||
PageLimit int `json:"page_limit"`
|
||||
ColdStorage int64 `json:"cold_storage"`
|
||||
Status string `json:"status"`
|
||||
UseCDN string `json:"use_cdn"`
|
||||
AvailableColdStorage int64 `json:"available_cold_storage"`
|
||||
DefaultPort string `json:"default_port"`
|
||||
DefaultDomain int `json:"default_domain"`
|
||||
Email string `json:"email"`
|
||||
DownloadMenu string `json:"download_menu"`
|
||||
FTPDID int `json:"ftp_did"`
|
||||
DefaultPortFiles string `json:"default_port_files"`
|
||||
FTPReport string `json:"ftp_report"`
|
||||
OverQuota int64 `json:"overquota"`
|
||||
AvailableStorage int64 `json:"available_storage"`
|
||||
CDN string `json:"cdn"`
|
||||
Offer string `json:"offer"`
|
||||
SubscriptionEnd string `json:"subscription_end"`
|
||||
TFA string `json:"2fa"`
|
||||
AllowedColdStorage int64 `json:"allowed_cold_storage"`
|
||||
HotStorage int64 `json:"hot_storage"`
|
||||
DefaultColdStorageQuota int64 `json:"default_cold_storage_quota"`
|
||||
FTPMode string `json:"ftp_mode"`
|
||||
RUReport string `json:"ru_report"`
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -32,7 +33,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/filefabric/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -267,7 +267,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, rootID string, path string
|
||||
"pid": rootID,
|
||||
}, &resp, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to check path exists")
|
||||
return nil, fmt.Errorf("failed to check path exists: %w", err)
|
||||
}
|
||||
if resp.Exists != "y" {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
@@ -308,7 +308,7 @@ func (f *Fs) getApplianceInfo(ctx context.Context) error {
|
||||
"token": "*",
|
||||
}, &applianceInfo, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read appliance version")
|
||||
return fmt.Errorf("failed to read appliance version: %w", err)
|
||||
}
|
||||
f.opt.Version = applianceInfo.SoftwareVersionLabel
|
||||
f.m.Set("version", f.opt.Version)
|
||||
@@ -349,7 +349,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
|
||||
"authtoken": f.opt.PermanentToken,
|
||||
}, &info, nil)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get session token")
|
||||
return "", fmt.Errorf("failed to get session token: %w", err)
|
||||
}
|
||||
refreshed = true
|
||||
now = now.Add(tokenLifeTime)
|
||||
@@ -490,7 +490,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Root is a dir - cache its ID
|
||||
f.dirCache.Put(f.root, info.ID)
|
||||
}
|
||||
} else {
|
||||
//} else {
|
||||
// Root is not found so a directory
|
||||
}
|
||||
}
|
||||
@@ -562,7 +562,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
"fi_name": f.opt.Enc.FromStandardName(leaf),
|
||||
}, &info, nil)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to create directory")
|
||||
return "", fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
// fmt.Printf("...Id %q\n", *info.Id)
|
||||
return info.Item.ID, nil
|
||||
@@ -595,7 +595,7 @@ OUTER:
|
||||
var info api.GetFolderContentsResponse
|
||||
_, err = f.rpc(ctx, "getFolderContents", p, &info, nil)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to list directory")
|
||||
return false, fmt.Errorf("failed to list directory: %w", err)
|
||||
}
|
||||
for i := range info.Items {
|
||||
item := &info.Items[i]
|
||||
@@ -726,7 +726,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) (err error) {
|
||||
"completedeletion": "n",
|
||||
}, &info, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to delete file")
|
||||
return fmt.Errorf("failed to delete file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -763,7 +763,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
}, &info, nil)
|
||||
f.dirCache.FlushDir(dir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to remove directory")
|
||||
return fmt.Errorf("failed to remove directory: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -825,7 +825,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
_, err = f.rpc(ctx, "doCopyFile", p, &info, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to copy file")
|
||||
return nil, fmt.Errorf("failed to copy file: %w", err)
|
||||
}
|
||||
err = dstObj.setMetaData(&info.Item)
|
||||
if err != nil {
|
||||
@@ -857,7 +857,7 @@ func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err
|
||||
"taskid": taskID,
|
||||
}, &info, nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to wait for task %s to complete", taskID)
|
||||
return fmt.Errorf("failed to wait for task %s to complete: %w", taskID, err)
|
||||
}
|
||||
if len(info.Tasks) == 0 {
|
||||
// task has finished
|
||||
@@ -890,7 +890,7 @@ func (f *Fs) renameLeaf(ctx context.Context, isDir bool, id string, newLeaf stri
|
||||
"fi_name": newLeaf,
|
||||
}, &info, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to rename leaf")
|
||||
return nil, fmt.Errorf("failed to rename leaf: %w", err)
|
||||
}
|
||||
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
|
||||
if err != nil {
|
||||
@@ -934,7 +934,7 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
|
||||
"dir_id": newDirectoryID,
|
||||
}, &info, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to move file to new directory")
|
||||
return nil, fmt.Errorf("failed to move file to new directory: %w", err)
|
||||
}
|
||||
item = &info.Item
|
||||
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
|
||||
@@ -1037,7 +1037,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
var info api.EmptyResponse
|
||||
_, err = f.rpc(ctx, "emptyTrashInBackground", params{}, &info, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to empty trash")
|
||||
return fmt.Errorf("failed to empty trash: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1164,7 +1164,7 @@ func (o *Object) modifyFile(ctx context.Context, keyValues [][2]string) error {
|
||||
"data": data.String(),
|
||||
}, &info, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to update metadata")
|
||||
return fmt.Errorf("failed to update metadata: %w", err)
|
||||
}
|
||||
return o.setMetaData(&info.Item)
|
||||
}
|
||||
@@ -1247,7 +1247,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
_, err = o.fs.rpc(ctx, "doInitUpload", p, &upload, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to initialize upload")
|
||||
return fmt.Errorf("failed to initialize upload: %w", err)
|
||||
}
|
||||
|
||||
// Cancel the upload if aborted or it fails
|
||||
@@ -1290,13 +1290,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return o.fs.shouldRetry(ctx, resp, err, nil, try)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to upload")
|
||||
return fmt.Errorf("failed to upload: %w", err)
|
||||
}
|
||||
if uploader.Success != "y" {
|
||||
return errors.Errorf("upload failed")
|
||||
return fmt.Errorf("upload failed")
|
||||
}
|
||||
if size > 0 && uploader.FileSize != size {
|
||||
return errors.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
|
||||
return fmt.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
|
||||
}
|
||||
|
||||
// Now finalize the file
|
||||
@@ -1308,7 +1308,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
_, err = o.fs.rpc(ctx, "doCompleteUpload", p, &finalize, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to finalize upload")
|
||||
return fmt.Errorf("failed to finalize upload: %w", err)
|
||||
}
|
||||
finalized = true
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@ package ftp
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/textproto"
|
||||
@@ -14,7 +16,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -44,23 +45,24 @@ const (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "ftp",
|
||||
Description: "FTP Connection",
|
||||
Description: "FTP",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + currentUser + ".",
|
||||
Name: "user",
|
||||
Help: "FTP username.",
|
||||
Default: currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21).",
|
||||
Name: "port",
|
||||
Help: "FTP port number.",
|
||||
Default: 21,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password.",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "tls",
|
||||
Help: `Use Implicit FTPS (FTP over TLS).
|
||||
@@ -98,6 +100,11 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Help: "Disable using MLSD even if server advertises support.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_utf8",
|
||||
Help: "Disable using UTF-8 even if server advertises support.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "writing_mdtm",
|
||||
Help: "Use MDTM to set modification time (VsFtpd quirk)",
|
||||
@@ -138,6 +145,14 @@ Enabled by default. Use 0 to disable.`,
|
||||
Help: "Maximum time to wait for data connection closing status.",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "ask_password",
|
||||
Default: false,
|
||||
Help: `Allow asking for FTP password when needed.
|
||||
|
||||
If this is set and no password is supplied then rclone will ask for a password
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -174,10 +189,12 @@ type Options struct {
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
DisableUTF8 bool `config:"disable_utf8"`
|
||||
WritingMDTM bool `config:"writing_mdtm"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -327,6 +344,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
if f.opt.DisableMLSD {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
||||
}
|
||||
if f.opt.DisableUTF8 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledUTF8(true))
|
||||
}
|
||||
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
|
||||
}
|
||||
@@ -349,7 +369,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
|
||||
err = fmt.Errorf("failed to make FTP connection to %q: %w", f.dialAddr, err)
|
||||
}
|
||||
return c, err
|
||||
}
|
||||
@@ -396,8 +416,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular FTP error code then check the connection
|
||||
_, isRegularError := errors.Cause(err).(*textproto.Error)
|
||||
if !isRegularError {
|
||||
var tpErr *textproto.Error
|
||||
if !errors.As(err, &tpErr) {
|
||||
nopErr := c.NoOp()
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
@@ -443,9 +463,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pass, err := obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFS decrypt password")
|
||||
pass := ""
|
||||
if opt.AskPassword && opt.Pass == "" {
|
||||
pass = config.GetPassword("FTP server password")
|
||||
} else {
|
||||
pass, err = obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewFS decrypt password: %w", err)
|
||||
}
|
||||
}
|
||||
user := opt.User
|
||||
if user == "" {
|
||||
@@ -462,7 +487,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
protocol = "ftps://"
|
||||
}
|
||||
if opt.TLS && opt.ExplicitTLS {
|
||||
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
|
||||
}
|
||||
var tlsConfig *tls.Config
|
||||
if opt.TLS || opt.ExplicitTLS {
|
||||
@@ -502,7 +527,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFs")
|
||||
return nil, fmt.Errorf("NewFs: %w", err)
|
||||
}
|
||||
f.fGetTime = c.IsGetTimeSupported()
|
||||
f.fSetTime = c.IsSetTimeSupported()
|
||||
@@ -520,7 +545,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
}
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
|
||||
// File doesn't exist so return old f
|
||||
f.root = root
|
||||
return f, nil
|
||||
@@ -599,7 +624,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
||||
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "findItem")
|
||||
return nil, fmt.Errorf("findItem: %w", err)
|
||||
}
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
@@ -643,7 +668,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
||||
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "dirExists")
|
||||
return false, fmt.Errorf("dirExists: %w", err)
|
||||
}
|
||||
if entry != nil && entry.Type == ftp.EntryTypeFolder {
|
||||
return true, nil
|
||||
@@ -664,7 +689,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
return nil, fmt.Errorf("list: %w", err)
|
||||
}
|
||||
|
||||
var listErr error
|
||||
@@ -693,7 +718,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f, "Timeout when waiting for List")
|
||||
return nil, errors.New("Timeout when waiting for List")
|
||||
return nil, errors.New("timeout when waiting for List")
|
||||
}
|
||||
|
||||
// Annoyingly FTP returns success for a directory which
|
||||
@@ -702,7 +727,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if len(files) == 0 {
|
||||
exists, err := f.dirExists(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
return nil, fmt.Errorf("list: %w", err)
|
||||
}
|
||||
if !exists {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
@@ -766,7 +791,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||
err := f.mkParentDir(ctx, src.Remote())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
||||
return nil, fmt.Errorf("Put mkParentDir failed: %w", err)
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
@@ -789,7 +814,7 @@ func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err erro
|
||||
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getInfo")
|
||||
return nil, fmt.Errorf("getInfo: %w", err)
|
||||
}
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
@@ -827,7 +852,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||
}
|
||||
return fs.ErrorIsFile
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return errors.Wrapf(err, "mkdir %q failed", abspath)
|
||||
return fmt.Errorf("mkdir %q failed: %w", abspath, err)
|
||||
}
|
||||
parent := path.Dir(abspath)
|
||||
err = f.mkdir(ctx, parent)
|
||||
@@ -836,7 +861,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||
}
|
||||
c, connErr := f.getFtpConnection(ctx)
|
||||
if connErr != nil {
|
||||
return errors.Wrap(connErr, "mkdir")
|
||||
return fmt.Errorf("mkdir: %w", connErr)
|
||||
}
|
||||
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
||||
f.putFtpConnection(&c, err)
|
||||
@@ -872,7 +897,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||
return fmt.Errorf("Rmdir: %w", translateErrorFile(err))
|
||||
}
|
||||
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
|
||||
f.putFtpConnection(&c, err)
|
||||
@@ -888,11 +913,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
||||
return nil, fmt.Errorf("Move mkParentDir failed: %w", err)
|
||||
}
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move")
|
||||
return nil, fmt.Errorf("Move: %w", err)
|
||||
}
|
||||
err = c.Rename(
|
||||
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
|
||||
@@ -900,11 +925,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move Rename failed")
|
||||
return nil, fmt.Errorf("Move Rename failed: %w", err)
|
||||
}
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move NewObject failed")
|
||||
return nil, fmt.Errorf("Move NewObject failed: %w", err)
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
@@ -934,19 +959,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
return fs.ErrorIsFile
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return errors.Wrapf(err, "DirMove getInfo failed")
|
||||
return fmt.Errorf("DirMove getInfo failed: %w", err)
|
||||
}
|
||||
|
||||
// Make sure the parent directory exists
|
||||
err = f.mkdir(ctx, path.Dir(dstPath))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||
return fmt.Errorf("DirMove mkParentDir dst failed: %w", err)
|
||||
}
|
||||
|
||||
// Do the move
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove")
|
||||
return fmt.Errorf("DirMove: %w", err)
|
||||
}
|
||||
err = c.Rename(
|
||||
f.dirFromStandardPath(srcPath),
|
||||
@@ -954,7 +979,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
|
||||
return fmt.Errorf("DirMove Rename(%q,%q) failed: %w", srcPath, dstPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1111,12 +1136,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, errors.Wrap(err, "open")
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||
return rc, nil
|
||||
@@ -1146,7 +1171,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update")
|
||||
return fmt.Errorf("Update: %w", err)
|
||||
}
|
||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||
// Ignore error 250 here - send by some servers
|
||||
@@ -1164,15 +1189,15 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// recycle connection in advance to let remove() find free token
|
||||
o.fs.putFtpConnection(nil, err)
|
||||
remove()
|
||||
return errors.Wrap(err, "update stor")
|
||||
return fmt.Errorf("update stor: %w", err)
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
return fmt.Errorf("SetModTime: %w", err)
|
||||
}
|
||||
o.info, err = o.fs.getInfo(ctx, path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "update getinfo")
|
||||
return fmt.Errorf("update getinfo: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1191,7 +1216,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
} else {
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
return fmt.Errorf("Remove: %w", err)
|
||||
}
|
||||
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
|
||||
@@ -36,7 +36,7 @@ func (f *Fs) testUploadTimeout(t *testing.T) {
|
||||
const (
|
||||
fileSize = 100000000 // 100 MiB
|
||||
idleTimeout = 40 * time.Millisecond // small because test server is local
|
||||
maxTime = 5 * time.Second // prevent test hangup
|
||||
maxTime = 10 * time.Second // prevent test hangup
|
||||
)
|
||||
|
||||
if testing.Short() {
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -23,9 +24,9 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -43,6 +44,7 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
option "google.golang.org/api/option"
|
||||
|
||||
// NOTE: This API is deprecated
|
||||
storage "google.golang.org/api/storage/v1"
|
||||
@@ -65,7 +67,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -182,15 +184,30 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "asia-northeast1",
|
||||
Help: "Tokyo",
|
||||
}, {
|
||||
Value: "asia-northeast2",
|
||||
Help: "Osaka",
|
||||
}, {
|
||||
Value: "asia-northeast3",
|
||||
Help: "Seoul",
|
||||
}, {
|
||||
Value: "asia-south1",
|
||||
Help: "Mumbai",
|
||||
}, {
|
||||
Value: "asia-south2",
|
||||
Help: "Delhi",
|
||||
}, {
|
||||
Value: "asia-southeast1",
|
||||
Help: "Singapore",
|
||||
}, {
|
||||
Value: "asia-southeast2",
|
||||
Help: "Jakarta",
|
||||
}, {
|
||||
Value: "australia-southeast1",
|
||||
Help: "Sydney",
|
||||
}, {
|
||||
Value: "australia-southeast2",
|
||||
Help: "Melbourne",
|
||||
}, {
|
||||
Value: "europe-north1",
|
||||
Help: "Finland",
|
||||
@@ -206,6 +223,12 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "europe-west4",
|
||||
Help: "Netherlands",
|
||||
}, {
|
||||
Value: "europe-west6",
|
||||
Help: "Zürich",
|
||||
}, {
|
||||
Value: "europe-central2",
|
||||
Help: "Warsaw",
|
||||
}, {
|
||||
Value: "us-central1",
|
||||
Help: "Iowa",
|
||||
@@ -221,6 +244,33 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "us-west2",
|
||||
Help: "California",
|
||||
}, {
|
||||
Value: "us-west3",
|
||||
Help: "Salt Lake City",
|
||||
}, {
|
||||
Value: "us-west4",
|
||||
Help: "Las Vegas",
|
||||
}, {
|
||||
Value: "northamerica-northeast1",
|
||||
Help: "Montréal",
|
||||
}, {
|
||||
Value: "northamerica-northeast2",
|
||||
Help: "Toronto",
|
||||
}, {
|
||||
Value: "southamerica-east1",
|
||||
Help: "São Paulo",
|
||||
}, {
|
||||
Value: "southamerica-west1",
|
||||
Help: "Santiago",
|
||||
}, {
|
||||
Value: "asia1",
|
||||
Help: "Dual region: asia-northeast1 and asia-northeast2.",
|
||||
}, {
|
||||
Value: "eur4",
|
||||
Help: "Dual region: europe-north1 and europe-west4.",
|
||||
}, {
|
||||
Value: "nam4",
|
||||
Help: "Dual region: us-central1 and us-east1.",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
@@ -247,6 +297,28 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||
Help: "Durable reduced availability storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: "no_check_bucket",
|
||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does if you know the bucket exists already.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "decompress",
|
||||
Help: `If set this will decompress gzip encoded objects.
|
||||
|
||||
It is possible to upload objects to GCS with "Content-Encoding: gzip"
|
||||
set. Normally rclone will download these files files as compressed objects.
|
||||
|
||||
If this flag is set then rclone will decompress these files with
|
||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
||||
can't check the size and hash but the file contents will be decompressed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -269,21 +341,24 @@ type Options struct {
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
Decompress bool `config:"decompress"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache of bucket status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache of bucket status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
warnCompressed sync.Once // warn once about compressed files
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -297,6 +372,7 @@ type Object struct {
|
||||
bytes int64 // Bytes in the object
|
||||
modTime time.Time // Modified time of the object
|
||||
mimeType string
|
||||
gzipped bool // set if object has Content-Encoding: gzip
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -314,7 +390,7 @@ func (f *Fs) Root() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return fmt.Sprintf("GCS root")
|
||||
return "GCS root"
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
|
||||
@@ -375,7 +451,7 @@ func (o *Object) split() (bucket, bucketPath string) {
|
||||
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
return nil, fmt.Errorf("error processing credentials: %w", err)
|
||||
}
|
||||
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
@@ -408,7 +484,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
||||
}
|
||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||
}
|
||||
@@ -417,7 +493,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
} else if opt.ServiceAccountCredentials != "" {
|
||||
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
||||
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
@@ -425,7 +501,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ctx := context.Background()
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -434,7 +510,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
}
|
||||
f.setRoot(root)
|
||||
@@ -447,9 +523,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
f.svc, err = storage.New(f.client)
|
||||
f.svc, err = storage.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
|
||||
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
|
||||
}
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
@@ -759,10 +835,10 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
return nil
|
||||
} else if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code != http.StatusNotFound {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
return fmt.Errorf("failed to get bucket: %w", err)
|
||||
}
|
||||
} else {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
return fmt.Errorf("failed to get bucket: %w", err)
|
||||
}
|
||||
|
||||
if f.opt.ProjectNumber == "" {
|
||||
@@ -792,6 +868,14 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// checkBucket creates the bucket if it doesn't exist unless NoCheckBucket is true
|
||||
func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
|
||||
if f.opt.NoCheckBucket {
|
||||
return nil
|
||||
}
|
||||
return f.makeBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
@@ -825,7 +909,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
err := f.checkBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -909,6 +993,7 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
o.url = info.MediaLink
|
||||
o.bytes = int64(info.Size)
|
||||
o.mimeType = info.ContentType
|
||||
o.gzipped = info.ContentEncoding == "gzip"
|
||||
|
||||
// Read md5sum
|
||||
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
|
||||
@@ -947,6 +1032,12 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
} else {
|
||||
o.modTime = modTime
|
||||
}
|
||||
|
||||
// If gunzipping then size and md5sum are unknown
|
||||
if o.gzipped && o.fs.opt.Decompress {
|
||||
o.bytes = -1
|
||||
o.md5sum = ""
|
||||
}
|
||||
}
|
||||
|
||||
// readObjectInfo reads the definition for an object
|
||||
@@ -1047,6 +1138,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
if o.gzipped && !o.fs.opt.Decompress {
|
||||
// Allow files which are stored on the cloud storage system
|
||||
// compressed to be downloaded without being decompressed. Note
|
||||
// that setting this here overrides the automatic decompression
|
||||
// in the Transport.
|
||||
//
|
||||
// See: https://cloud.google.com/storage/docs/transcoding
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
o.fs.warnCompressed.Do(func() {
|
||||
fs.Logf(o, "Not decompressing 'Content-Encoding: gzip' compressed file. Use --gcs-decompress to override")
|
||||
})
|
||||
}
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1065,7 +1168,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
_, isRanging := req.Header["Range"]
|
||||
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
|
||||
_ = res.Body.Close() // ignore error
|
||||
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
|
||||
return nil, fmt.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
@@ -1075,7 +1178,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.makeBucket(ctx, bucket)
|
||||
err := o.fs.checkBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package googlephotos
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -17,7 +18,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -69,7 +69,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -85,7 +85,7 @@ func init() {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't parse config into struct")
|
||||
return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
|
||||
}
|
||||
|
||||
switch config.State {
|
||||
@@ -139,7 +139,7 @@ you want to read the media.`,
|
||||
Default: false,
|
||||
Help: `Also view and download archived media.
|
||||
|
||||
By default rclone does not request archived media. Thus, when syncing,
|
||||
By default, rclone does not request archived media. Thus, when syncing,
|
||||
archived media is not visible in directory listings or transferred.
|
||||
|
||||
Note that media in albums is always visible and synced, no matter
|
||||
@@ -292,7 +292,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
return nil, fmt.Errorf("failed to configure Box: %w", err)
|
||||
}
|
||||
|
||||
root = strings.Trim(path.Clean(root), "/")
|
||||
@@ -345,13 +345,13 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "couldn't read openID config")
|
||||
return "", fmt.Errorf("couldn't read openID config: %w", err)
|
||||
}
|
||||
|
||||
// Find userinfo endpoint
|
||||
endpoint, ok := openIDconfig[name].(string)
|
||||
if !ok {
|
||||
return "", errors.Errorf("couldn't find %q from openID config", name)
|
||||
return "", fmt.Errorf("couldn't find %q from openID config", name)
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
@@ -374,7 +374,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read user info")
|
||||
return nil, fmt.Errorf("couldn't read user info: %w", err)
|
||||
}
|
||||
return userInfo, nil
|
||||
}
|
||||
@@ -405,7 +405,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't revoke token")
|
||||
return fmt.Errorf("couldn't revoke token: %w", err)
|
||||
}
|
||||
fs.Infof(f, "res = %+v", res)
|
||||
return nil
|
||||
@@ -492,7 +492,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list albums")
|
||||
return nil, fmt.Errorf("couldn't list albums: %w", err)
|
||||
}
|
||||
newAlbums := result.Albums
|
||||
if shared {
|
||||
@@ -549,7 +549,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't list files")
|
||||
return fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
items := result.MediaItems
|
||||
if len(items) > 0 && items[0].ID == lastID {
|
||||
@@ -562,7 +562,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
||||
for i := range items {
|
||||
item := &result.MediaItems[i]
|
||||
remote := item.Filename
|
||||
remote = strings.Replace(remote, "/", "/", -1)
|
||||
remote = strings.ReplaceAll(remote, "/", "/")
|
||||
err = fn(remote, item, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -693,7 +693,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create album")
|
||||
return nil, fmt.Errorf("couldn't create album: %w", err)
|
||||
}
|
||||
f.albums[false].add(&result)
|
||||
return &result, nil
|
||||
@@ -879,7 +879,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't get media item")
|
||||
return fmt.Errorf("couldn't get media item: %w", err)
|
||||
}
|
||||
o.setMetaData(&item)
|
||||
return nil
|
||||
@@ -1014,7 +1014,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't upload file")
|
||||
return fmt.Errorf("couldn't upload file: %w", err)
|
||||
}
|
||||
uploadToken := strings.TrimSpace(string(token))
|
||||
if uploadToken == "" {
|
||||
@@ -1042,14 +1042,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create media item")
|
||||
return fmt.Errorf("failed to create media item: %w", err)
|
||||
}
|
||||
if len(result.NewMediaItemResults) != 1 {
|
||||
return errors.New("bad response to BatchCreate wrong number of items")
|
||||
}
|
||||
mediaItemResult := result.NewMediaItemResults[0]
|
||||
if mediaItemResult.Status.Code != 0 {
|
||||
return errors.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
|
||||
return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
|
||||
}
|
||||
o.setMetaData(&mediaItemResult.MediaItem)
|
||||
|
||||
@@ -1071,7 +1071,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
albumTitle, fileName := match[1], match[2]
|
||||
album, ok := o.fs.albums[false].get(albumTitle)
|
||||
if !ok {
|
||||
return errors.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
|
||||
return fmt.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -1087,7 +1087,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't delete item from album")
|
||||
return fmt.Errorf("couldn't delete item from album: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestIntegration(t *testing.T) {
|
||||
}
|
||||
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
||||
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
@@ -270,7 +269,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
|
||||
year := match[1]
|
||||
current, err := time.Parse("2006", year)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("bad year %q", match[1])
|
||||
return nil, fmt.Errorf("bad year %q", match[1])
|
||||
}
|
||||
currentYear := current.Year()
|
||||
for current.Year() == currentYear {
|
||||
@@ -284,7 +283,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
|
||||
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
|
||||
year, err := strconv.Atoi(match[1])
|
||||
if err != nil || year < 1000 || year > 3000 {
|
||||
return sf, errors.Errorf("bad year %q", match[1])
|
||||
return sf, fmt.Errorf("bad year %q", match[1])
|
||||
}
|
||||
sf = api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
@@ -300,14 +299,14 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
|
||||
if len(match) >= 3 {
|
||||
month, err := strconv.Atoi(match[2])
|
||||
if err != nil || month < 1 || month > 12 {
|
||||
return sf, errors.Errorf("bad month %q", match[2])
|
||||
return sf, fmt.Errorf("bad month %q", match[2])
|
||||
}
|
||||
sf.Filters.DateFilter.Dates[0].Month = month
|
||||
}
|
||||
if len(match) >= 4 {
|
||||
day, err := strconv.Atoi(match[3])
|
||||
if err != nil || day < 1 || day > 31 {
|
||||
return sf, errors.Errorf("bad day %q", match[3])
|
||||
return sf, fmt.Errorf("bad day %q", match[3])
|
||||
}
|
||||
sf.Filters.DateFilter.Dates[0].Day = day
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums,
|
||||
|
||||
// mock listUploads for testing
|
||||
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, _ = f.uploaded[dir]
|
||||
entries = f.uploaded[dir]
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2,9 +2,10 @@ package hasher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
@@ -118,18 +119,18 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
|
||||
case fs.ErrorIsFile:
|
||||
// ok
|
||||
case nil:
|
||||
return errors.Errorf("not a file: %s", sumRemote)
|
||||
return fmt.Errorf("not a file: %s", sumRemote)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
sumObj, err := sumFs.NewObject(ctx, path.Base(sumPath))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot open sum file")
|
||||
return fmt.Errorf("cannot open sum file: %w", err)
|
||||
}
|
||||
hashes, err := operations.ParseSumFile(ctx, sumObj)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse sum file")
|
||||
return fmt.Errorf("failed to parse sum file: %w", err)
|
||||
}
|
||||
|
||||
if sticky {
|
||||
|
||||
@@ -4,6 +4,7 @@ package hasher
|
||||
import (
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
@@ -11,7 +12,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -27,6 +27,9 @@ func init() {
|
||||
Name: "hasher",
|
||||
Description: "Better checksums for other remotes",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
@@ -102,7 +105,7 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
||||
remotePath := fspath.JoinRootPath(opt.Remote, rpath)
|
||||
baseFs, err := cache.Get(ctx, remotePath)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, errors.Wrapf(err, "failed to derive base remote %q", opt.Remote)
|
||||
return nil, fmt.Errorf("failed to derive base remote %q: %w", opt.Remote, err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
@@ -127,7 +130,7 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
||||
for _, hashName := range opt.Hashes {
|
||||
var ht hash.Type
|
||||
if err := ht.Set(hashName); err != nil {
|
||||
return nil, errors.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
|
||||
return nil, fmt.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
|
||||
}
|
||||
if !f.slowHashes.Contains(ht) {
|
||||
f.autoHashes.Add(ht)
|
||||
@@ -158,6 +161,11 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
||||
IsLocal: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
}
|
||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||
|
||||
@@ -202,7 +210,11 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
|
||||
for _, entry := range baseEntries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
hashEntries = append(hashEntries, f.wrapObject(x, nil))
|
||||
obj, err := f.wrapObject(x, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashEntries = append(hashEntries, obj)
|
||||
default:
|
||||
hashEntries = append(hashEntries, entry) // trash in - trash out
|
||||
}
|
||||
@@ -251,7 +263,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
if do := f.Fs.Features().PutStream; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
return f.wrapObject(oResult, err)
|
||||
}
|
||||
return nil, errors.New("PutStream not supported")
|
||||
}
|
||||
@@ -261,7 +273,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
if do := f.Fs.Features().PutUnchecked; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
return f.wrapObject(oResult, err)
|
||||
}
|
||||
return nil, errors.New("PutUnchecked not supported")
|
||||
}
|
||||
@@ -278,7 +290,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
if do := f.Fs.Features().CleanUp; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return errors.New("CleanUp not supported")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
@@ -286,7 +298,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if do := f.Fs.Features().About; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
@@ -348,7 +360,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
oResult, err := do(ctx, o.Object, remote)
|
||||
return f.wrapObject(oResult, err), err
|
||||
return f.wrapObject(oResult, err)
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
@@ -371,7 +383,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
dir: false,
|
||||
fs: f,
|
||||
})
|
||||
return f.wrapObject(oResult, nil), nil
|
||||
return f.wrapObject(oResult, nil)
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
|
||||
@@ -410,7 +422,7 @@ func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o, err := f.Fs.NewObject(ctx, remote)
|
||||
return f.wrapObject(o, err), err
|
||||
return f.wrapObject(o, err)
|
||||
}
|
||||
|
||||
//
|
||||
@@ -424,11 +436,15 @@ type Object struct {
|
||||
}
|
||||
|
||||
// Wrap base object into hasher object
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) *Object {
|
||||
if err != nil || o == nil {
|
||||
return nil
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) (obj fs.Object, outErr error) {
|
||||
// log.Trace(o, "err=%v", err)("obj=%#v, outErr=%v", &obj, &outErr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Object{Object: o, f: f}
|
||||
if o == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return &Object{Object: o, f: f}, nil
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
@@ -477,6 +493,17 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
do, ok := o.Object.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -499,10 +526,5 @@ var (
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
|
||||
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
||||
_, o := fstests.PutTestContents(ctx, t, f, &item, data, true)
|
||||
o := fstests.PutTestContents(ctx, t, f, &item, data, true)
|
||||
require.NotNil(t, o)
|
||||
return o
|
||||
}
|
||||
@@ -35,7 +35,7 @@ func (f *Fs) testUploadFromCrypt(t *testing.T) {
|
||||
// make a temporary crypt remote
|
||||
ctx := context.Background()
|
||||
pass := obscure.MustObscure("crypt")
|
||||
remote := fmt.Sprintf(":crypt,remote=%s,password=%s:", tempRoot, pass)
|
||||
remote := fmt.Sprintf(`:crypt,remote="%s",password="%s":`, tempRoot, pass)
|
||||
cryptFs, err := fs.NewFs(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@ func TestIntegration(t *testing.T) {
|
||||
{Name: "TestHasher", Key: "remote", Value: tempDir},
|
||||
}
|
||||
opt.RemoteName = "TestHasher:"
|
||||
opt.QuickTestOK = true
|
||||
}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
|
||||
@@ -4,11 +4,11 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
@@ -199,10 +199,10 @@ func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
|
||||
r.Hashes[hashType] = hashVal
|
||||
}
|
||||
if data, err = r.encode(op.key); err != nil {
|
||||
return errors.Wrap(err, "marshal failed")
|
||||
return fmt.Errorf("marshal failed: %w", err)
|
||||
}
|
||||
if err = b.Put([]byte(op.key), data); err != nil {
|
||||
return errors.Wrap(err, "put failed")
|
||||
return fmt.Errorf("put failed: %w", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -294,7 +294,7 @@ func (f *Fs) dumpLine(r *hashRecord, path string, include bool, err error) strin
|
||||
if hashVal == "" || err != nil {
|
||||
hashVal = "-"
|
||||
}
|
||||
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType), hashVal)
|
||||
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType, false), hashVal)
|
||||
hashes = append(hashes, hashName+":"+hashVal)
|
||||
}
|
||||
hashesStr := strings.Join(hashes, " ")
|
||||
|
||||
@@ -2,13 +2,13 @@ package hasher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
@@ -184,7 +184,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadC
|
||||
// Put data into the remote path with given modTime and size
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
var (
|
||||
o *Object
|
||||
o fs.Object
|
||||
common hash.Set
|
||||
rehash bool
|
||||
hashes hashMap
|
||||
@@ -210,8 +210,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
|
||||
o = f.wrapObject(oResult, err)
|
||||
if o == nil {
|
||||
o, err = f.wrapObject(oResult, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -224,7 +224,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
}
|
||||
}
|
||||
if len(hashes) > 0 {
|
||||
err := o.putHashes(ctx, hashes)
|
||||
err := o.(*Object).putHashes(ctx, hashes)
|
||||
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
|
||||
}
|
||||
return o, err
|
||||
|
||||
@@ -92,7 +92,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.ServicePrincipalName != "" {
|
||||
options.KerberosClient, err = getKerberosClient()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
|
||||
return nil, fmt.Errorf("problem with kerberos authentication: %w", err)
|
||||
}
|
||||
options.KerberosServicePrincipleName = opt.ServicePrincipalName
|
||||
|
||||
@@ -263,6 +263,98 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.client.RemoveAll(realpath)
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Get the real paths from the remote specs:
|
||||
sourcePath := srcObj.fs.realpath(srcObj.remote)
|
||||
targetPath := f.realpath(remote)
|
||||
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
|
||||
|
||||
// Make sure the target folder exists:
|
||||
dirname := path.Dir(targetPath)
|
||||
err := f.client.MkdirAll(dirname, 0755)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
// Note that the underlying HDFS library hard-codes Overwrite=True, but this is expected rclone behaviour.
|
||||
err = f.client.Rename(sourcePath, targetPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Look up the resulting object
|
||||
info, err := f.client.Stat(targetPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// And return it:
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: info.Size(),
|
||||
modTime: info.ModTime(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Get the real paths from the remote specs:
|
||||
sourcePath := srcFs.realpath(srcRemote)
|
||||
targetPath := f.realpath(dstRemote)
|
||||
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
|
||||
|
||||
// Check if the destination exists:
|
||||
info, err := f.client.Stat(targetPath)
|
||||
if err == nil {
|
||||
fs.Debugf(f, "target directory already exits, IsDir = [%t]", info.IsDir())
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Make sure the targets parent folder exists:
|
||||
dirname := path.Dir(targetPath)
|
||||
err = f.client.MkdirAll(dirname, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
err = f.client.Rename(sourcePath, targetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
info, err := f.client.StatFs()
|
||||
@@ -318,4 +410,6 @@ var (
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -22,9 +22,8 @@ func init() {
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
Required: false,
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "root",
|
||||
Help: "Connect to hdfs as root.",
|
||||
@@ -36,7 +35,6 @@ func init() {
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "data_transfer_protection",
|
||||
@@ -46,7 +44,6 @@ Specifies whether or not authentication, data signature integrity
|
||||
checks, and wire encryption is required when communicating the the
|
||||
datanodes. Possible values are 'authentication', 'integrity' and
|
||||
'privacy'. Used only with KERBEROS enabled.`,
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "privacy",
|
||||
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||
|
||||
@@ -115,7 +115,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
info, err := o.fs.client.Stat(realpath)
|
||||
_, err = o.fs.client.Stat(realpath)
|
||||
if err == nil {
|
||||
err = o.fs.client.Remove(realpath)
|
||||
if err != nil {
|
||||
@@ -147,7 +147,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
info, err = o.fs.client.Stat(realpath)
|
||||
info, err := o.fs.client.Stat(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
81
backend/hidrive/api/queries.go
Normal file
81
backend/hidrive/api/queries.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Some presets for different amounts of information that can be requested for fields;
|
||||
// it is recommended to only request the information that is actually needed.
|
||||
var (
|
||||
HiDriveObjectNoMetadataFields = []string{"name", "type"}
|
||||
HiDriveObjectWithMetadataFields = append(HiDriveObjectNoMetadataFields, "id", "size", "mtime", "chash")
|
||||
HiDriveObjectWithDirectoryMetadataFields = append(HiDriveObjectWithMetadataFields, "nmembers")
|
||||
DirectoryContentFields = []string{"nmembers"}
|
||||
)
|
||||
|
||||
// QueryParameters represents the parameters passed to an API-call.
|
||||
type QueryParameters struct {
|
||||
url.Values
|
||||
}
|
||||
|
||||
// NewQueryParameters initializes an instance of QueryParameters and
|
||||
// returns a pointer to it.
|
||||
func NewQueryParameters() *QueryParameters {
|
||||
return &QueryParameters{url.Values{}}
|
||||
}
|
||||
|
||||
// SetFileInDirectory sets the appropriate parameters
|
||||
// to specify a path to a file in a directory.
|
||||
// This is used by requests that work with paths for files that do not exist yet.
|
||||
// (For example when creating a file).
|
||||
// Most requests use the format produced by SetPath(...).
|
||||
func (p *QueryParameters) SetFileInDirectory(filePath string) {
|
||||
directory, file := path.Split(path.Clean(filePath))
|
||||
p.Set("dir", path.Clean(directory))
|
||||
p.Set("name", file)
|
||||
// NOTE: It would be possible to switch to pid-based requests
|
||||
// by modifying this function.
|
||||
}
|
||||
|
||||
// SetPath sets the appropriate parameters to access the given path.
|
||||
func (p *QueryParameters) SetPath(objectPath string) {
|
||||
p.Set("path", path.Clean(objectPath))
|
||||
// NOTE: It would be possible to switch to pid-based requests
|
||||
// by modifying this function.
|
||||
}
|
||||
|
||||
// SetTime sets the key to the time-value. It replaces any existing values.
|
||||
func (p *QueryParameters) SetTime(key string, value time.Time) error {
|
||||
valueAPI := Time(value)
|
||||
valueBytes, err := json.Marshal(&valueAPI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Set(key, string(valueBytes))
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddList adds the given values as a list
|
||||
// with each value separated by the separator.
|
||||
// It appends to any existing values associated with key.
|
||||
func (p *QueryParameters) AddList(key string, separator string, values ...string) {
|
||||
original := p.Get(key)
|
||||
p.Set(key, strings.Join(values, separator))
|
||||
if original != "" {
|
||||
p.Set(key, original+separator+p.Get(key))
|
||||
}
|
||||
}
|
||||
|
||||
// AddFields sets the appropriate parameter to access the given fields.
|
||||
// The given fields will be appended to any other existing fields.
|
||||
func (p *QueryParameters) AddFields(prefix string, fields ...string) {
|
||||
modifiedFields := make([]string, len(fields))
|
||||
for i, field := range fields {
|
||||
modifiedFields[i] = prefix + field
|
||||
}
|
||||
p.AddList("fields", ",", modifiedFields...)
|
||||
}
|
||||
135
backend/hidrive/api/types.go
Normal file
135
backend/hidrive/api/types.go
Normal file
@@ -0,0 +1,135 @@
|
||||
// Package api has type definitions and code related to API-calls for the HiDrive-API.
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Time represents date and time information for the API.
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns Time into JSON (in Unix-time/UTC).
|
||||
func (t *Time) MarshalJSON() ([]byte, error) {
|
||||
secs := time.Time(*t).Unix()
|
||||
return []byte(strconv.FormatInt(secs, 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into Time.
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
secs, err := strconv.ParseInt(string(data), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(time.Unix(secs, 0))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error is returned from the API when things go wrong.
|
||||
type Error struct {
|
||||
Code json.Number `json:"code"`
|
||||
ContextInfo json.RawMessage
|
||||
Message string `json:"msg"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface.
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q", e.Code.String())
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.ContextInfo != nil {
|
||||
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface.
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// possible types for HiDriveObject
|
||||
const (
|
||||
HiDriveObjectTypeDirectory = "dir"
|
||||
HiDriveObjectTypeFile = "file"
|
||||
HiDriveObjectTypeSymlink = "symlink"
|
||||
)
|
||||
|
||||
// HiDriveObject describes a folder, a symlink or a file.
|
||||
// Depending on the type and content, not all fields are present.
|
||||
type HiDriveObject struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
ParentID string `json:"parent_id"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Size int64 `json:"size"`
|
||||
MemberCount int64 `json:"nmembers"`
|
||||
ModifiedAt Time `json:"mtime"`
|
||||
ChangedAt Time `json:"ctime"`
|
||||
MetaHash string `json:"mhash"`
|
||||
MetaOnlyHash string `json:"mohash"`
|
||||
NameHash string `json:"nhash"`
|
||||
ContentHash string `json:"chash"`
|
||||
IsTeamfolder bool `json:"teamfolder"`
|
||||
Readable bool `json:"readable"`
|
||||
Writable bool `json:"writable"`
|
||||
Shareable bool `json:"shareable"`
|
||||
MIMEType string `json:"mime_type"`
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the HiDriveObject.
|
||||
func (i *HiDriveObject) ModTime() time.Time {
|
||||
t := time.Time(i.ModifiedAt)
|
||||
if t.IsZero() {
|
||||
t = time.Time(i.ChangedAt)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into HiDriveObject and
|
||||
// introduces specific default-values where necessary.
|
||||
func (i *HiDriveObject) UnmarshalJSON(data []byte) error {
|
||||
type objectAlias HiDriveObject
|
||||
defaultObject := objectAlias{
|
||||
Size: -1,
|
||||
MemberCount: -1,
|
||||
}
|
||||
|
||||
err := json.Unmarshal(data, &defaultObject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name, err := url.PathUnescape(defaultObject.Name)
|
||||
if err == nil {
|
||||
defaultObject.Name = name
|
||||
}
|
||||
|
||||
*i = HiDriveObject(defaultObject)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DirectoryContent describes the content of a directory.
|
||||
type DirectoryContent struct {
|
||||
TotalCount int64 `json:"nmembers"`
|
||||
Entries []HiDriveObject `json:"members"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into DirectoryContent and
|
||||
// introduces specific default-values where necessary.
|
||||
func (d *DirectoryContent) UnmarshalJSON(data []byte) error {
|
||||
type directoryContentAlias DirectoryContent
|
||||
defaultDirectoryContent := directoryContentAlias{
|
||||
TotalCount: -1,
|
||||
}
|
||||
|
||||
err := json.Unmarshal(data, &defaultDirectoryContent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*d = DirectoryContent(defaultDirectoryContent)
|
||||
return nil
|
||||
}
|
||||
888
backend/hidrive/helpers.go
Normal file
888
backend/hidrive/helpers.go
Normal file
@@ -0,0 +1,888 @@
|
||||
package hidrive
|
||||
|
||||
// This file is for helper-functions which may provide more general and
|
||||
// specialized functionality than the generic interfaces.
|
||||
// There are two sections:
|
||||
// 1. methods bound to Fs
|
||||
// 2. other functions independent from Fs used throughout the package
|
||||
|
||||
// NOTE: Functions accessing paths expect any relative paths
|
||||
// to be resolved prior to execution with resolvePath(...).
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/hidrive/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/ranges"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaximumUploadBytes represents the maximum amount of bytes
|
||||
// a single upload-operation will support.
|
||||
MaximumUploadBytes = 2147483647 // = 2GiB - 1
|
||||
// iterationChunkSize represents the chunk size used to iterate directory contents.
|
||||
iterationChunkSize = 5000
|
||||
)
|
||||
|
||||
var (
|
||||
// retryErrorCodes is a slice of error codes that we will always retry.
|
||||
retryErrorCodes = []int{
|
||||
429, // Too Many Requests
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
// ErrorFileExists is returned when a query tries to create a file
|
||||
// that already exists.
|
||||
ErrorFileExists = errors.New("destination file already exists")
|
||||
)
|
||||
|
||||
// MemberType represents the possible types of entries a directory can contain.
|
||||
type MemberType string
|
||||
|
||||
// possible values for MemberType
|
||||
const (
|
||||
AllMembers MemberType = "all"
|
||||
NoMembers MemberType = "none"
|
||||
DirectoryMembers MemberType = api.HiDriveObjectTypeDirectory
|
||||
FileMembers MemberType = api.HiDriveObjectTypeFile
|
||||
SymlinkMembers MemberType = api.HiDriveObjectTypeSymlink
|
||||
)
|
||||
|
||||
// SortByField represents possible fields to sort entries of a directory by.
|
||||
type SortByField string
|
||||
|
||||
// possible values for SortByField
|
||||
const (
|
||||
descendingSort string = "-"
|
||||
SortByName SortByField = "name"
|
||||
SortByModTime SortByField = "mtime"
|
||||
SortByObjectType SortByField = "type"
|
||||
SortBySize SortByField = "size"
|
||||
SortByNameDescending SortByField = SortByField(descendingSort) + SortByName
|
||||
SortByModTimeDescending SortByField = SortByField(descendingSort) + SortByModTime
|
||||
SortByObjectTypeDescending SortByField = SortByField(descendingSort) + SortByObjectType
|
||||
SortBySizeDescending SortByField = SortByField(descendingSort) + SortBySize
|
||||
)
|
||||
|
||||
var (
|
||||
// Unsorted disables sorting and can therefore not be combined with other values.
|
||||
Unsorted = []SortByField{"none"}
|
||||
// DefaultSorted does not specify how to sort and
|
||||
// therefore implies the default sort order.
|
||||
DefaultSorted = []SortByField{}
|
||||
)
|
||||
|
||||
// CopyOrMoveOperationType represents the possible types of copy- and move-operations.
|
||||
type CopyOrMoveOperationType int
|
||||
|
||||
// possible values for CopyOrMoveOperationType
|
||||
const (
|
||||
MoveOriginal CopyOrMoveOperationType = iota
|
||||
CopyOriginal
|
||||
CopyOriginalPreserveModTime
|
||||
)
|
||||
|
||||
// OnExistAction represents possible actions the API should take,
|
||||
// when a request tries to create a path that already exists.
|
||||
type OnExistAction string
|
||||
|
||||
// possible values for OnExistAction
|
||||
const (
|
||||
// IgnoreOnExist instructs the API not to execute
|
||||
// the request in case of a conflict, but to return an error.
|
||||
IgnoreOnExist OnExistAction = "ignore"
|
||||
// AutoNameOnExist instructs the API to automatically rename
|
||||
// any conflicting request-objects.
|
||||
AutoNameOnExist OnExistAction = "autoname"
|
||||
// OverwriteOnExist instructs the API to overwrite any conflicting files.
|
||||
// This can only be used, if the request operates on files directly.
|
||||
// (For example when moving/copying a file.)
|
||||
// For most requests this action will simply be ignored.
|
||||
OverwriteOnExist OnExistAction = "overwrite"
|
||||
)
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err deserve to be retried.
|
||||
// It tries to expire/invalidate the token, if necessary.
|
||||
// It returns the err as a convenience.
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if resp != nil && (resp.StatusCode == 401 || isHTTPError(err, 401)) && len(resp.Header["Www-Authenticate"]) > 0 {
|
||||
fs.Debugf(f, "Token might be invalid: %v", err)
|
||||
if f.tokenRenewer != nil {
|
||||
iErr := f.tokenRenewer.Expire()
|
||||
if iErr == nil {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// resolvePath resolves the given (relative) path and
|
||||
// returns a path suitable for API-calls.
|
||||
// This will consider the root-path of the fs and any needed prefixes.
|
||||
//
|
||||
// Any relative paths passed to functions that access these paths should
|
||||
// be resolved with this first!
|
||||
func (f *Fs) resolvePath(objectPath string) string {
|
||||
resolved := path.Join(f.opt.RootPrefix, f.root, f.opt.Enc.FromStandardPath(objectPath))
|
||||
return resolved
|
||||
}
|
||||
|
||||
// iterateOverDirectory calls the given function callback
|
||||
// on each item found in a given directory.
|
||||
//
|
||||
// If callback ever returns true then this exits early with found = true.
|
||||
func (f *Fs) iterateOverDirectory(ctx context.Context, directory string, searchOnly MemberType, callback func(*api.HiDriveObject) bool, fields []string, sortBy []SortByField) (found bool, err error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(directory)
|
||||
parameters.AddFields("members.", fields...)
|
||||
parameters.AddFields("", api.DirectoryContentFields...)
|
||||
parameters.Set("members", string(searchOnly))
|
||||
for _, v := range sortBy {
|
||||
// The explicit conversion is necessary for each element.
|
||||
parameters.AddList("sort", ",", string(v))
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/dir",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
iterateContent := func(result *api.DirectoryContent, err error) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, item := range result.Entries {
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
if callback(&item) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
return f.paginateDirectoryAccess(ctx, &opts, iterationChunkSize, 0, iterateContent)
|
||||
}
|
||||
|
||||
// paginateDirectoryAccess executes requests specified via ctx and opts
|
||||
// which should produce api.DirectoryContent.
|
||||
// This will paginate the requests using limit starting at the given offset.
|
||||
//
|
||||
// The given function callback is called on each api.DirectoryContent found
|
||||
// along with any errors that occurred.
|
||||
// If callback ever returns true then this exits early with found = true.
|
||||
// If callback ever returns an error then this exits early with that error.
|
||||
func (f *Fs) paginateDirectoryAccess(ctx context.Context, opts *rest.Opts, limit int64, offset int64, callback func(*api.DirectoryContent, error) (bool, error)) (found bool, err error) {
|
||||
for {
|
||||
opts.Parameters.Set("limit", strconv.FormatInt(offset, 10)+","+strconv.FormatInt(limit, 10))
|
||||
|
||||
var result api.DirectoryContent
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
found, err = callback(&result, err)
|
||||
if found || err != nil {
|
||||
return found, err
|
||||
}
|
||||
|
||||
offset += int64(len(result.Entries))
|
||||
if offset >= result.TotalCount || limit > int64(len(result.Entries)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// fetchMetadataForPath reads the metadata from the path.
|
||||
func (f *Fs) fetchMetadataForPath(ctx context.Context, path string, fields []string) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(path)
|
||||
parameters.AddFields("", fields...)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/meta",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// copyOrMove copies or moves a directory or file
|
||||
// from the source-path to the destination-path.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
//
|
||||
// NOTE: Use the explicit methods instead of directly invoking this method.
|
||||
// (Those are: copyDirectory, moveDirectory, copyFile, moveFile.)
|
||||
func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType CopyOrMoveOperationType, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.Set("src", source)
|
||||
parameters.Set("dst", destination)
|
||||
if onExist == AutoNameOnExist ||
|
||||
(onExist == OverwriteOnExist && !isDirectory) {
|
||||
parameters.Set("on_exist", string(onExist))
|
||||
}
|
||||
|
||||
endpoint := "/"
|
||||
if isDirectory {
|
||||
endpoint += "dir"
|
||||
} else {
|
||||
endpoint += "file"
|
||||
}
|
||||
switch operationType {
|
||||
case MoveOriginal:
|
||||
endpoint += "/move"
|
||||
case CopyOriginalPreserveModTime:
|
||||
parameters.Set("preserve_mtime", strconv.FormatBool(true))
|
||||
fallthrough
|
||||
case CopyOriginal:
|
||||
endpoint += "/copy"
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: endpoint,
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// copyDirectory moves the directory at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
|
||||
}
|
||||
|
||||
// moveDirectory moves the directory at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
func (f *Fs) moveDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, true, MoveOriginal, source, destination, onExist)
|
||||
}
|
||||
|
||||
// copyFile copies the file at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
//
|
||||
// NOTE: This operation will expand sparse areas in the content of the source-file
|
||||
// to blocks of 0-bytes in the destination-file.
|
||||
func (f *Fs) copyFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, false, CopyOriginalPreserveModTime, source, destination, onExist)
|
||||
}
|
||||
|
||||
// moveFile moves the file at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
//
|
||||
// NOTE: This operation may expand sparse areas in the content of the source-file
|
||||
// to blocks of 0-bytes in the destination-file.
|
||||
func (f *Fs) moveFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, false, MoveOriginal, source, destination, onExist)
|
||||
}
|
||||
|
||||
// createDirectory creates the directory at the given path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The directory will only be created if its parent-directory exists.
|
||||
// This returns fs.ErrorDirNotFound if the parent-directory is not found.
|
||||
// This returns fs.ErrorDirExists if the directory already exists.
|
||||
func (f *Fs) createDirectory(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(directory)
|
||||
if onExist == AutoNameOnExist {
|
||||
parameters.Set("on_exist", string(onExist))
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/dir",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return &result, nil
|
||||
case isHTTPError(err, 404):
|
||||
return nil, fs.ErrorDirNotFound
|
||||
case isHTTPError(err, 409):
|
||||
return nil, fs.ErrorDirExists
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// createDirectories creates the directory at the given path
|
||||
// along with any missing parent directories and
|
||||
// returns the resulting api-object (of the created directory) if successful.
|
||||
//
|
||||
// This returns fs.ErrorDirExists if the directory already exists.
|
||||
//
|
||||
// If an error occurs while the parent directories are being created,
|
||||
// any directories already created will NOT be deleted again.
|
||||
func (f *Fs) createDirectories(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
result, err := f.createDirectory(ctx, directory, onExist)
|
||||
if err == nil {
|
||||
return result, nil
|
||||
}
|
||||
if err != fs.ErrorDirNotFound {
|
||||
return nil, err
|
||||
}
|
||||
parentDirectory := path.Dir(directory)
|
||||
_, err = f.createDirectories(ctx, parentDirectory, onExist)
|
||||
if err != nil && err != fs.ErrorDirExists {
|
||||
return nil, err
|
||||
}
|
||||
// NOTE: Ignoring fs.ErrorDirExists does no harm,
|
||||
// since it does not mean the child directory cannot be created.
|
||||
return f.createDirectory(ctx, directory, onExist)
|
||||
}
|
||||
|
||||
// deleteDirectory deletes the directory at the given path.
|
||||
//
|
||||
// If recursive is false, the directory will only be deleted if it is empty.
|
||||
// If recursive is true, the directory will be deleted regardless of its content.
|
||||
// This returns fs.ErrorDirNotFound if the directory is not found.
|
||||
// This returns fs.ErrorDirectoryNotEmpty if the directory is not empty and
|
||||
// recursive is false.
|
||||
func (f *Fs) deleteDirectory(ctx context.Context, directory string, recursive bool) error {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(directory)
|
||||
parameters.Set("recursive", strconv.FormatBool(recursive))
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/dir",
|
||||
Parameters: parameters.Values,
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
switch {
|
||||
case isHTTPError(err, 404):
|
||||
return fs.ErrorDirNotFound
|
||||
case isHTTPError(err, 409):
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// deleteObject deletes the object/file at the given path.
|
||||
//
|
||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
||||
func (f *Fs) deleteObject(ctx context.Context, path string) error {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(path)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/file",
|
||||
Parameters: parameters.Values,
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if isHTTPError(err, 404) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// createFile creates a file at the given path
|
||||
// with the content of the io.ReadSeeker.
|
||||
// This guarantees that existing files will not be overwritten.
|
||||
// The maximum size of the content is limited by MaximumUploadBytes.
|
||||
// The io.ReadSeeker should be resettable by seeking to its start.
|
||||
// If modTime is not the zero time instant,
|
||||
// it will be set as the file's modification time after the operation.
|
||||
//
|
||||
// This returns fs.ErrorDirNotFound
|
||||
// if the parent directory of the file is not found.
|
||||
// This returns ErrorFileExists if a file already exists at the specified path.
|
||||
func (f *Fs) createFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetFileInDirectory(path)
|
||||
if onExist == AutoNameOnExist {
|
||||
parameters.Set("on_exist", string(onExist))
|
||||
}
|
||||
|
||||
var err error
|
||||
if !modTime.IsZero() {
|
||||
err = parameters.SetTime("mtime", modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file",
|
||||
Body: content,
|
||||
ContentType: "application/octet-stream",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// Reset the reading index (in case this is a retry).
|
||||
if _, err = content.Seek(0, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return &result, nil
|
||||
case isHTTPError(err, 404):
|
||||
return nil, fs.ErrorDirNotFound
|
||||
case isHTTPError(err, 409):
|
||||
return nil, ErrorFileExists
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// overwriteFile updates the content of the file at the given path
|
||||
// with the content of the io.ReadSeeker.
|
||||
// If the file does not exist it will be created.
|
||||
// The maximum size of the content is limited by MaximumUploadBytes.
|
||||
// The io.ReadSeeker should be resettable by seeking to its start.
|
||||
// If modTime is not the zero time instant,
|
||||
// it will be set as the file's modification time after the operation.
|
||||
//
|
||||
// This returns fs.ErrorDirNotFound
|
||||
// if the parent directory of the file is not found.
|
||||
func (f *Fs) overwriteFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetFileInDirectory(path)
|
||||
|
||||
var err error
|
||||
if !modTime.IsZero() {
|
||||
err = parameters.SetTime("mtime", modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file",
|
||||
Body: content,
|
||||
ContentType: "application/octet-stream",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// Reset the reading index (in case this is a retry).
|
||||
if _, err = content.Seek(0, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return &result, nil
|
||||
case isHTTPError(err, 404):
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// uploadFileChunked updates the content of the existing file at the given path
|
||||
// with the content of the io.Reader.
|
||||
// Returns the position of the last successfully written byte, stopping before the first failed write.
|
||||
// If nothing was written this will be 0.
|
||||
// Returns the resulting api-object if successful.
|
||||
//
|
||||
// Replaces the file contents by uploading multiple chunks of the given size in parallel.
|
||||
// Therefore this can and be used to upload files of any size efficiently.
|
||||
// The number of parallel transfers is limited by transferLimit which should larger than 0.
|
||||
// If modTime is not the zero time instant,
|
||||
// it will be set as the file's modification time after the operation.
|
||||
//
|
||||
// NOTE: This method uses updateFileChunked and may create sparse files,
|
||||
// if the upload of a chunk fails unexpectedly.
|
||||
// See note about sparse files in patchFile.
|
||||
// If any of the uploads fail, the process will be aborted and
|
||||
// the first error that occurred will be returned.
|
||||
// This is not an atomic operation,
|
||||
// therefore if the upload fails the file may be partially modified.
|
||||
//
|
||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
||||
func (f *Fs) uploadFileChunked(ctx context.Context, path string, content io.Reader, modTime time.Time, chunkSize int, transferLimit int64) (okSize uint64, info *api.HiDriveObject, err error) {
|
||||
okSize, err = f.updateFileChunked(ctx, path, content, 0, chunkSize, transferLimit)
|
||||
|
||||
if err == nil {
|
||||
info, err = f.resizeFile(ctx, path, okSize, modTime)
|
||||
}
|
||||
return okSize, info, err
|
||||
}
|
||||
|
||||
// updateFileChunked updates the content of the existing file at the given path
|
||||
// starting at the given offset.
|
||||
// Returns the position of the last successfully written byte, stopping before the first failed write.
|
||||
// If nothing was written this will be 0.
|
||||
//
|
||||
// Replaces the file contents starting from the given byte offset
|
||||
// with the content of the io.Reader.
|
||||
// If the offset is beyond the file end, the file is extended up to the offset.
|
||||
//
|
||||
// The upload is done multiple chunks of the given size in parallel.
|
||||
// Therefore this can and be used to upload files of any size efficiently.
|
||||
// The number of parallel transfers is limited by transferLimit which should larger than 0.
|
||||
//
|
||||
// NOTE: Because it is inefficient to set the modification time with every chunk,
|
||||
// setting it to a specific value must be done in a separate request
|
||||
// after this operation finishes.
|
||||
//
|
||||
// NOTE: This method uses patchFile and may create sparse files,
|
||||
// especially if the upload of a chunk fails unexpectedly.
|
||||
// See note about sparse files in patchFile.
|
||||
// If any of the uploads fail, the process will be aborted and
|
||||
// the first error that occurred will be returned.
|
||||
// This is not an atomic operation,
|
||||
// therefore if the upload fails the file may be partially modified.
|
||||
//
|
||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
||||
func (f *Fs) updateFileChunked(ctx context.Context, path string, content io.Reader, offset uint64, chunkSize int, transferLimit int64) (okSize uint64, err error) {
|
||||
var (
|
||||
okChunksMu sync.Mutex // protects the variables below
|
||||
okChunks []ranges.Range
|
||||
)
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
transferSemaphore := semaphore.NewWeighted(transferLimit)
|
||||
|
||||
var readErr error
|
||||
startMoreTransfers := true
|
||||
zeroTime := time.Time{}
|
||||
for chunk := uint64(0); startMoreTransfers; chunk++ {
|
||||
// Acquire semaphore to limit number of transfers in parallel.
|
||||
readErr = transferSemaphore.Acquire(gCtx, 1)
|
||||
if readErr != nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Read a chunk of data.
|
||||
chunkReader, bytesRead, readErr := readerForChunk(content, chunkSize)
|
||||
if bytesRead < chunkSize {
|
||||
startMoreTransfers = false
|
||||
}
|
||||
if readErr != nil || bytesRead <= 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Transfer the chunk.
|
||||
chunkOffset := uint64(chunkSize)*chunk + offset
|
||||
g.Go(func() error {
|
||||
// After this upload is done,
|
||||
// signal that another transfer can be started.
|
||||
defer transferSemaphore.Release(1)
|
||||
uploadErr := f.patchFile(gCtx, path, cachedReader(chunkReader), chunkOffset, zeroTime)
|
||||
if uploadErr == nil {
|
||||
// Remember successfully written chunks.
|
||||
okChunksMu.Lock()
|
||||
okChunks = append(okChunks, ranges.Range{Pos: int64(chunkOffset), Size: int64(bytesRead)})
|
||||
okChunksMu.Unlock()
|
||||
fs.Debugf(f, "Done uploading chunk of size %v at offset %v.", bytesRead, chunkOffset)
|
||||
} else {
|
||||
fs.Infof(f, "Error while uploading chunk at offset %v. Error is %v.", chunkOffset, uploadErr)
|
||||
}
|
||||
return uploadErr
|
||||
})
|
||||
}
|
||||
|
||||
if readErr != nil {
|
||||
// Log the error in case it is later ignored because of an upload-error.
|
||||
fs.Infof(f, "Error while reading/preparing to upload a chunk. Error is %v.", readErr)
|
||||
}
|
||||
|
||||
err = g.Wait()
|
||||
|
||||
// Compute the first continuous range of the file content,
|
||||
// which does not contain any failed chunks.
|
||||
// Do not forget to add the file content up to the starting offset,
|
||||
// which is presumed to be already correct.
|
||||
rs := ranges.Ranges{}
|
||||
rs.Insert(ranges.Range{Pos: 0, Size: int64(offset)})
|
||||
for _, chunkRange := range okChunks {
|
||||
rs.Insert(chunkRange)
|
||||
}
|
||||
if len(rs) > 0 && rs[0].Pos == 0 {
|
||||
okSize = uint64(rs[0].Size)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return okSize, err
|
||||
}
|
||||
if readErr != nil {
|
||||
return okSize, readErr
|
||||
}
|
||||
|
||||
return okSize, nil
|
||||
}
|
||||
|
||||
// patchFile updates the content of the existing file at the given path
|
||||
// starting at the given offset.
|
||||
//
|
||||
// Replaces the file contents starting from the given byte offset
|
||||
// with the content of the io.ReadSeeker.
|
||||
// If the offset is beyond the file end, the file is extended up to the offset.
|
||||
// The maximum size of the update is limited by MaximumUploadBytes.
|
||||
// The io.ReadSeeker should be resettable by seeking to its start.
|
||||
// If modTime is not the zero time instant,
|
||||
// it will be set as the file's modification time after the operation.
|
||||
//
|
||||
// NOTE: By extending the file up to the offset this may create sparse files,
|
||||
// which allocate less space on the file system than their apparent size indicates,
|
||||
// since holes between data chunks are "real" holes
|
||||
// and not regions made up of consecutive 0-bytes.
|
||||
// Subsequent operations (such as copying data)
|
||||
// usually expand the holes into regions of 0-bytes.
|
||||
//
|
||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
||||
func (f *Fs) patchFile(ctx context.Context, path string, content io.ReadSeeker, offset uint64, modTime time.Time) error {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(path)
|
||||
parameters.Set("offset", strconv.FormatUint(offset, 10))
|
||||
|
||||
if !modTime.IsZero() {
|
||||
err := parameters.SetTime("mtime", modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/file",
|
||||
Body: content,
|
||||
ContentType: "application/octet-stream",
|
||||
Parameters: parameters.Values,
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// Reset the reading index (in case this is a retry).
|
||||
_, err = content.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
if isHTTPError(err, 423) {
|
||||
return true, err
|
||||
}
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if isHTTPError(err, 404) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// resizeFile updates the existing file at the given path to be of the given size
|
||||
// and returns the resulting api-object if successful.
|
||||
//
|
||||
// If the given size is smaller than the current filesize,
|
||||
// the file is cut/truncated at that position.
|
||||
// If the given size is larger, the file is extended up to that position.
|
||||
// If modTime is not the zero time instant,
|
||||
// it will be set as the file's modification time after the operation.
|
||||
//
|
||||
// NOTE: By extending the file this may create sparse files,
|
||||
// which allocate less space on the file system than their apparent size indicates,
|
||||
// since holes between data chunks are "real" holes
|
||||
// and not regions made up of consecutive 0-bytes.
|
||||
// Subsequent operations (such as copying data)
|
||||
// usually expand the holes into regions of 0-bytes.
|
||||
//
|
||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
||||
func (f *Fs) resizeFile(ctx context.Context, path string, size uint64, modTime time.Time) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(path)
|
||||
parameters.Set("size", strconv.FormatUint(size, 10))
|
||||
|
||||
if !modTime.IsZero() {
|
||||
err := parameters.SetTime("mtime", modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/truncate",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return &result, nil
|
||||
case isHTTPError(err, 404):
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// isHTTPError compares the numerical status code
|
||||
// of an api.Error to the given HTTP status.
|
||||
//
|
||||
// If the given error is not an api.Error or
|
||||
// a numerical status code could not be determined, this returns false.
|
||||
// Otherwise this returns whether the status code of the error is equal to the given status.
|
||||
func isHTTPError(err error, status int64) bool {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
errStatus, decodeErr := apiErr.Code.Int64()
|
||||
if decodeErr == nil && errStatus == status {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// createHiDriveScopes creates oauth-scopes
|
||||
// from the given user-role and access-permissions.
|
||||
//
|
||||
// If the arguments are empty, they will not be included in the result.
|
||||
func createHiDriveScopes(role string, access string) []string {
|
||||
switch {
|
||||
case role != "" && access != "":
|
||||
return []string{access + "," + role}
|
||||
case role != "":
|
||||
return []string{role}
|
||||
case access != "":
|
||||
return []string{access}
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// cachedReader returns a version of the reader that caches its contents and
|
||||
// can therefore be reset using Seek.
|
||||
func cachedReader(reader io.Reader) io.ReadSeeker {
|
||||
bytesReader, ok := reader.(*bytes.Reader)
|
||||
if ok {
|
||||
return bytesReader
|
||||
}
|
||||
|
||||
repeatableReader, ok := reader.(*readers.RepeatableReader)
|
||||
if ok {
|
||||
return repeatableReader
|
||||
}
|
||||
|
||||
return readers.NewRepeatableReader(reader)
|
||||
}
|
||||
|
||||
// readerForChunk reads a chunk of bytes from reader (after handling any accounting).
|
||||
// Returns a new io.Reader (chunkReader) for that chunk
|
||||
// and the number of bytes that have been read from reader.
|
||||
func readerForChunk(reader io.Reader, length int) (chunkReader io.Reader, bytesRead int, err error) {
|
||||
// Unwrap any accounting from the input if present.
|
||||
reader, wrap := accounting.UnWrap(reader)
|
||||
|
||||
// Read a chunk of data.
|
||||
buffer := make([]byte, length)
|
||||
bytesRead, err = io.ReadFull(reader, buffer)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, bytesRead, err
|
||||
}
|
||||
// Truncate unused capacity.
|
||||
buffer = buffer[:bytesRead]
|
||||
|
||||
// Use wrap to put any accounting back for chunkReader.
|
||||
return wrap(bytes.NewReader(buffer)), bytesRead, nil
|
||||
}
|
||||
1002
backend/hidrive/hidrive.go
Normal file
1002
backend/hidrive/hidrive.go
Normal file
File diff suppressed because it is too large
Load Diff
45
backend/hidrive/hidrive_test.go
Normal file
45
backend/hidrive/hidrive_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
// Test HiDrive filesystem interface
|
||||
package hidrive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote.
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestHiDrive"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: 1,
|
||||
MaxChunkSize: MaximumUploadBytes,
|
||||
CeilChunkSize: nil,
|
||||
NeedMultipleChunks: false,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Change the configured UploadChunkSize.
|
||||
// Will only be called while no transfer is in progress.
|
||||
func (f *Fs) SetUploadChunkSize(chunksize fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
var old fs.SizeSuffix
|
||||
old, f.opt.UploadChunkSize = f.opt.UploadChunkSize, chunksize
|
||||
return old, nil
|
||||
}
|
||||
|
||||
// Change the configured UploadCutoff.
|
||||
// Will only be called while no transfer is in progress.
|
||||
func (f *Fs) SetUploadCutoff(cutoff fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
var old fs.SizeSuffix
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cutoff
|
||||
return old, nil
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
410
backend/hidrive/hidrivehash/hidrivehash.go
Normal file
410
backend/hidrive/hidrivehash/hidrivehash.go
Normal file
@@ -0,0 +1,410 @@
|
||||
// Package hidrivehash implements the HiDrive hashing algorithm which combines SHA-1 hashes hierarchically to a single top-level hash.
|
||||
//
|
||||
// Note: This implementation does not grant access to any partial hashes generated.
|
||||
//
|
||||
// See: https://developer.hidrive.com/wp-content/uploads/2021/07/HiDrive_Synchronization-v3.3-rev28.pdf
|
||||
// (link to newest version: https://static.hidrive.com/dev/0001)
|
||||
package hidrivehash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/rclone/rclone/backend/hidrive/hidrivehash/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize of the checksum in bytes.
|
||||
BlockSize = 4096
|
||||
// Size of the checksum in bytes.
|
||||
Size = sha1.Size
|
||||
// sumsPerLevel is the number of checksums
|
||||
sumsPerLevel = 256
|
||||
)
|
||||
|
||||
var (
|
||||
// zeroSum is a special hash consisting of 20 null-bytes.
|
||||
// This will be the hash of any empty file (or ones containing only null-bytes).
|
||||
zeroSum = [Size]byte{}
|
||||
// ErrorInvalidEncoding is returned when a hash should be decoded from a binary form that is invalid.
|
||||
ErrorInvalidEncoding = errors.New("encoded binary form is invalid for this hash")
|
||||
// ErrorHashFull is returned when a hash reached its capacity and cannot accept any more input.
|
||||
ErrorHashFull = errors.New("hash reached its capacity")
|
||||
)
|
||||
|
||||
// writeByBlock writes len(p) bytes from p to the io.Writer in blocks of size blockSize.
|
||||
// It returns the number of bytes written from p (0 <= n <= len(p))
|
||||
// and any error encountered that caused the write to stop early.
|
||||
//
|
||||
// A pointer bytesInBlock to a counter needs to be supplied,
|
||||
// that is used to keep track how many bytes have been written to the writer already.
|
||||
// A pointer onlyNullBytesInBlock to a boolean needs to be supplied,
|
||||
// that is used to keep track whether the block so far only consists of null-bytes.
|
||||
// The callback onBlockWritten is called whenever a full block has been written to the writer
|
||||
// and is given as input the number of bytes that still need to be written.
|
||||
func writeByBlock(p []byte, writer io.Writer, blockSize uint32, bytesInBlock *uint32, onlyNullBytesInBlock *bool, onBlockWritten func(remaining int) error) (n int, err error) {
|
||||
total := len(p)
|
||||
nullBytes := make([]byte, blockSize)
|
||||
for len(p) > 0 {
|
||||
toWrite := int(blockSize - *bytesInBlock)
|
||||
if toWrite > len(p) {
|
||||
toWrite = len(p)
|
||||
}
|
||||
c, err := writer.Write(p[:toWrite])
|
||||
*bytesInBlock += uint32(c)
|
||||
*onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite])
|
||||
// Discard data written through a reslice
|
||||
p = p[c:]
|
||||
if err != nil {
|
||||
return total - len(p), err
|
||||
}
|
||||
if *bytesInBlock == blockSize {
|
||||
err = onBlockWritten(len(p))
|
||||
if err != nil {
|
||||
return total - len(p), err
|
||||
}
|
||||
*bytesInBlock = 0
|
||||
*onlyNullBytesInBlock = true
|
||||
}
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// level is a hash.Hash that is used to aggregate the checksums produced by the level hierarchically beneath it.
|
||||
// It is used to represent any level-n hash, except for level-0.
|
||||
type level struct {
|
||||
checksum [Size]byte // aggregated checksum of this level
|
||||
sumCount uint32 // number of sums contained in this level so far
|
||||
bytesInHasher uint32 // number of bytes written into hasher so far
|
||||
onlyNullBytesInHasher bool // whether the hasher only contains null-bytes so far
|
||||
hasher hash.Hash
|
||||
}
|
||||
|
||||
// NewLevel returns a new hash.Hash computing any level-n hash, except level-0.
|
||||
func NewLevel() hash.Hash {
|
||||
l := &level{}
|
||||
l.Reset()
|
||||
return l
|
||||
}
|
||||
|
||||
// Add takes a position-embedded SHA-1 checksum and adds it to the level.
|
||||
func (l *level) Add(sha1sum []byte) {
|
||||
var tmp uint
|
||||
var carry bool
|
||||
for i := Size - 1; i >= 0; i-- {
|
||||
tmp = uint(sha1sum[i]) + uint(l.checksum[i])
|
||||
if carry {
|
||||
tmp++
|
||||
}
|
||||
carry = tmp > 255
|
||||
l.checksum[i] = byte(tmp)
|
||||
}
|
||||
}
|
||||
|
||||
// IsFull returns whether the number of checksums added to this level reached its capacity.
|
||||
func (l *level) IsFull() bool {
|
||||
return l.sumCount >= sumsPerLevel
|
||||
}
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// Contrary to the specification from hash.Hash, this DOES return an error,
|
||||
// specifically ErrorHashFull if and only if IsFull() returns true.
|
||||
func (l *level) Write(p []byte) (n int, err error) {
|
||||
if l.IsFull() {
|
||||
return 0, ErrorHashFull
|
||||
}
|
||||
onBlockWritten := func(remaining int) error {
|
||||
if !l.onlyNullBytesInHasher {
|
||||
c, err := l.hasher.Write([]byte{byte(l.sumCount)})
|
||||
l.bytesInHasher += uint32(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Add(l.hasher.Sum(nil))
|
||||
}
|
||||
l.sumCount++
|
||||
l.hasher.Reset()
|
||||
if remaining > 0 && l.IsFull() {
|
||||
return ErrorHashFull
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return writeByBlock(p, l.hasher, uint32(l.BlockSize()), &l.bytesInHasher, &l.onlyNullBytesInHasher, onBlockWritten)
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (l *level) Sum(b []byte) []byte {
|
||||
return append(b, l.checksum[:]...)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (l *level) Reset() {
|
||||
l.checksum = zeroSum // clear the current checksum
|
||||
l.sumCount = 0
|
||||
l.bytesInHasher = 0
|
||||
l.onlyNullBytesInHasher = true
|
||||
l.hasher = sha1.New()
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (l *level) Size() int {
|
||||
return Size
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (l *level) BlockSize() int {
|
||||
return Size
|
||||
}
|
||||
|
||||
// MarshalBinary encodes the hash into a binary form and returns the result.
|
||||
func (l *level) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, Size+4+4+1)
|
||||
copy(b, l.checksum[:])
|
||||
binary.BigEndian.PutUint32(b[Size:], l.sumCount)
|
||||
binary.BigEndian.PutUint32(b[Size+4:], l.bytesInHasher)
|
||||
if l.onlyNullBytesInHasher {
|
||||
b[Size+4+4] = 1
|
||||
}
|
||||
encodedHasher, err := l.hasher.(encoding.BinaryMarshaler).MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = append(b, encodedHasher...)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
|
||||
// The hash will replace its internal state accordingly.
|
||||
func (l *level) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < Size+4+4+1 {
|
||||
return ErrorInvalidEncoding
|
||||
}
|
||||
copy(l.checksum[:], b)
|
||||
l.sumCount = binary.BigEndian.Uint32(b[Size:])
|
||||
l.bytesInHasher = binary.BigEndian.Uint32(b[Size+4:])
|
||||
switch b[Size+4+4] {
|
||||
case 0:
|
||||
l.onlyNullBytesInHasher = false
|
||||
case 1:
|
||||
l.onlyNullBytesInHasher = true
|
||||
default:
|
||||
return ErrorInvalidEncoding
|
||||
}
|
||||
err := l.hasher.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[Size+4+4+1:])
|
||||
return err
|
||||
}
|
||||
|
||||
// hidriveHash is the hash computing the actual checksum used by HiDrive by combining multiple level-hashes.
|
||||
type hidriveHash struct {
|
||||
levels []*level // collection of level-hashes, one for each level starting at level-1
|
||||
lastSumWritten [Size]byte // the last checksum written to any of the levels
|
||||
bytesInBlock uint32 // bytes written into blockHash so far
|
||||
onlyNullBytesInBlock bool // whether the hasher only contains null-bytes so far
|
||||
blockHash hash.Hash
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the HiDrive checksum.
|
||||
func New() hash.Hash {
|
||||
h := &hidriveHash{}
|
||||
h.Reset()
|
||||
return h
|
||||
}
|
||||
|
||||
// aggregateToLevel writes the checksum to the level at the given index
|
||||
// and if necessary propagates any changes to levels above.
|
||||
func (h *hidriveHash) aggregateToLevel(index int, sum []byte) {
|
||||
for i := index; ; i++ {
|
||||
if i >= len(h.levels) {
|
||||
h.levels = append(h.levels, NewLevel().(*level))
|
||||
}
|
||||
_, err := h.levels[i].Write(sum)
|
||||
copy(h.lastSumWritten[:], sum)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("level-hash should not have produced an error: %w", err))
|
||||
}
|
||||
if !h.levels[i].IsFull() {
|
||||
break
|
||||
}
|
||||
sum = h.levels[i].Sum(nil)
|
||||
h.levels[i].Reset()
|
||||
}
|
||||
}
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// It never returns an error.
|
||||
func (h *hidriveHash) Write(p []byte) (n int, err error) {
|
||||
onBlockWritten := func(remaining int) error {
|
||||
var sum []byte
|
||||
if h.onlyNullBytesInBlock {
|
||||
sum = zeroSum[:]
|
||||
} else {
|
||||
sum = h.blockHash.Sum(nil)
|
||||
}
|
||||
h.blockHash.Reset()
|
||||
h.aggregateToLevel(0, sum)
|
||||
return nil
|
||||
}
|
||||
return writeByBlock(p, h.blockHash, uint32(BlockSize), &h.bytesInBlock, &h.onlyNullBytesInBlock, onBlockWritten)
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (h *hidriveHash) Sum(b []byte) []byte {
|
||||
// Save internal state.
|
||||
state, err := h.MarshalBinary()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("saving the internal state should not have produced an error: %w", err))
|
||||
}
|
||||
|
||||
if h.bytesInBlock > 0 {
|
||||
// Fill remainder of block with null-bytes.
|
||||
filler := make([]byte, h.BlockSize()-int(h.bytesInBlock))
|
||||
_, err = h.Write(filler)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("filling with null-bytes should not have an error: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
checksum := zeroSum
|
||||
for i := 0; i < len(h.levels); i++ {
|
||||
level := h.levels[i]
|
||||
if i < len(h.levels)-1 {
|
||||
// Aggregate non-empty non-final levels.
|
||||
if level.sumCount >= 1 {
|
||||
h.aggregateToLevel(i+1, level.Sum(nil))
|
||||
level.Reset()
|
||||
}
|
||||
} else {
|
||||
// Determine sum of final level.
|
||||
if level.sumCount > 1 {
|
||||
copy(checksum[:], level.Sum(nil))
|
||||
} else {
|
||||
// This is needed, otherwise there is no way to return
|
||||
// the non-position-embedded checksum.
|
||||
checksum = h.lastSumWritten
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Restore internal state.
|
||||
err = h.UnmarshalBinary(state)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("restoring the internal state should not have produced an error: %w", err))
|
||||
}
|
||||
|
||||
return append(b, checksum[:]...)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (h *hidriveHash) Reset() {
|
||||
h.levels = nil
|
||||
h.lastSumWritten = zeroSum // clear the last written checksum
|
||||
h.bytesInBlock = 0
|
||||
h.onlyNullBytesInBlock = true
|
||||
h.blockHash = sha1.New()
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (h *hidriveHash) Size() int {
|
||||
return Size
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (h *hidriveHash) BlockSize() int {
|
||||
return BlockSize
|
||||
}
|
||||
|
||||
// MarshalBinary encodes the hash into a binary form and returns the result.
|
||||
func (h *hidriveHash) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, Size+4+1+8)
|
||||
copy(b, h.lastSumWritten[:])
|
||||
binary.BigEndian.PutUint32(b[Size:], h.bytesInBlock)
|
||||
if h.onlyNullBytesInBlock {
|
||||
b[Size+4] = 1
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint64(b[Size+4+1:], uint64(len(h.levels)))
|
||||
for _, level := range h.levels {
|
||||
encodedLevel, err := level.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encodedLength := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(encodedLength, uint64(len(encodedLevel)))
|
||||
b = append(b, encodedLength...)
|
||||
b = append(b, encodedLevel...)
|
||||
}
|
||||
encodedBlockHash, err := h.blockHash.(encoding.BinaryMarshaler).MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = append(b, encodedBlockHash...)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
|
||||
// The hash will replace its internal state accordingly.
|
||||
func (h *hidriveHash) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < Size+4+1+8 {
|
||||
return ErrorInvalidEncoding
|
||||
}
|
||||
copy(h.lastSumWritten[:], b)
|
||||
h.bytesInBlock = binary.BigEndian.Uint32(b[Size:])
|
||||
switch b[Size+4] {
|
||||
case 0:
|
||||
h.onlyNullBytesInBlock = false
|
||||
case 1:
|
||||
h.onlyNullBytesInBlock = true
|
||||
default:
|
||||
return ErrorInvalidEncoding
|
||||
}
|
||||
|
||||
amount := binary.BigEndian.Uint64(b[Size+4+1:])
|
||||
h.levels = make([]*level, int(amount))
|
||||
offset := Size + 4 + 1 + 8
|
||||
for i := range h.levels {
|
||||
length := int(binary.BigEndian.Uint64(b[offset:]))
|
||||
offset += 8
|
||||
h.levels[i] = NewLevel().(*level)
|
||||
err := h.levels[i].UnmarshalBinary(b[offset : offset+length])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offset += length
|
||||
}
|
||||
err := h.blockHash.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[offset:])
|
||||
return err
|
||||
}
|
||||
|
||||
// Sum returns the HiDrive checksum of the data.
|
||||
func Sum(data []byte) [Size]byte {
|
||||
h := New().(*hidriveHash)
|
||||
_, _ = h.Write(data)
|
||||
var result [Size]byte
|
||||
copy(result[:], h.Sum(nil))
|
||||
return result
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied.
|
||||
var (
|
||||
_ hash.Hash = (*level)(nil)
|
||||
_ encoding.BinaryMarshaler = (*level)(nil)
|
||||
_ encoding.BinaryUnmarshaler = (*level)(nil)
|
||||
_ internal.LevelHash = (*level)(nil)
|
||||
_ hash.Hash = (*hidriveHash)(nil)
|
||||
_ encoding.BinaryMarshaler = (*hidriveHash)(nil)
|
||||
_ encoding.BinaryUnmarshaler = (*hidriveHash)(nil)
|
||||
)
|
||||
395
backend/hidrive/hidrivehash/hidrivehash_test.go
Normal file
395
backend/hidrive/hidrivehash/hidrivehash_test.go
Normal file
@@ -0,0 +1,395 @@
|
||||
package hidrivehash_test
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/hidrive/hidrivehash"
|
||||
"github.com/rclone/rclone/backend/hidrive/hidrivehash/internal"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// helper functions to set up test-tables
|
||||
|
||||
func sha1ArrayAsSlice(sum [sha1.Size]byte) []byte {
|
||||
return sum[:]
|
||||
}
|
||||
|
||||
func mustDecode(hexstring string) []byte {
|
||||
result, err := hex.DecodeString(hexstring)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
var testTableLevelPositionEmbedded = []struct {
|
||||
ins [][]byte
|
||||
outs [][]byte
|
||||
name string
|
||||
}{
|
||||
{
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}),
|
||||
sha1ArrayAsSlice([20]byte{78, 188, 156, 219, 173, 54, 81, 55, 47, 220, 222, 207, 201, 21, 57, 252, 255, 239, 251, 186}),
|
||||
},
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}),
|
||||
sha1ArrayAsSlice([20]byte{68, 135, 96, 187, 38, 253, 14, 167, 186, 167, 188, 210, 91, 177, 185, 13, 208, 217, 94, 18}),
|
||||
},
|
||||
"documentation-v3.2rev27-example L0 (position-embedded)",
|
||||
},
|
||||
{
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}),
|
||||
sha1ArrayAsSlice([20]byte{75, 211, 153, 190, 125, 179, 67, 49, 60, 149, 98, 246, 142, 20, 11, 254, 159, 162, 129, 237}),
|
||||
sha1ArrayAsSlice([20]byte{150, 2, 9, 153, 97, 153, 189, 104, 147, 14, 77, 203, 244, 243, 25, 212, 67, 48, 111, 107}),
|
||||
},
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}),
|
||||
sha1ArrayAsSlice([20]byte{144, 209, 246, 100, 177, 216, 171, 229, 83, 17, 92, 135, 68, 98, 76, 72, 217, 24, 99, 176}),
|
||||
sha1ArrayAsSlice([20]byte{38, 211, 255, 254, 19, 114, 105, 77, 230, 31, 170, 83, 57, 85, 102, 29, 28, 72, 211, 27}),
|
||||
},
|
||||
"documentation-example L0 (position-embedded)",
|
||||
},
|
||||
{
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}),
|
||||
sha1ArrayAsSlice([20]byte{40, 34, 8, 238, 37, 5, 237, 184, 79, 105, 10, 167, 171, 254, 13, 229, 132, 112, 254, 8}),
|
||||
sha1ArrayAsSlice([20]byte{39, 112, 26, 86, 190, 35, 100, 101, 28, 131, 122, 191, 254, 144, 239, 107, 253, 124, 104, 203}),
|
||||
},
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}),
|
||||
sha1ArrayAsSlice([20]byte{213, 157, 141, 227, 213, 178, 25, 111, 200, 145, 77, 164, 17, 247, 202, 167, 37, 46, 0, 124}),
|
||||
sha1ArrayAsSlice([20]byte{253, 13, 168, 58, 147, 213, 125, 212, 229, 20, 200, 100, 16, 136, 186, 19, 34, 170, 105, 71}),
|
||||
},
|
||||
"documentation-example L1 (position-embedded)",
|
||||
},
|
||||
}
|
||||
|
||||
var testTableLevel = []struct {
|
||||
ins [][]byte
|
||||
outs [][]byte
|
||||
name string
|
||||
}{
|
||||
{
|
||||
[][]byte{
|
||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
||||
},
|
||||
[][]byte{
|
||||
mustDecode("44fe5ca6342568b4167bf990b64e404a3975e1c3"),
|
||||
mustDecode("90d1f664b1d8abe553115c8744624c48d91863b0"),
|
||||
mustDecode("26d3fffe1372694de61faa533955661d1c48d31b"),
|
||||
},
|
||||
"documentation-example L0",
|
||||
},
|
||||
{
|
||||
[][]byte{
|
||||
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
|
||||
mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"),
|
||||
mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"),
|
||||
},
|
||||
[][]byte{
|
||||
mustDecode("ad7b84f5b0ac2bb7792842fc65f9bcc1a0bd0274"),
|
||||
mustDecode("d59d8de3d5b2196fc8914da411f7caa7252e007c"),
|
||||
mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"),
|
||||
},
|
||||
"documentation-example L1",
|
||||
},
|
||||
{
|
||||
[][]byte{
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
},
|
||||
[][]byte{
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"),
|
||||
mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"),
|
||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
||||
mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"),
|
||||
mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"),
|
||||
},
|
||||
"mixed-with-empties",
|
||||
},
|
||||
}
|
||||
|
||||
var testTable = []struct {
|
||||
data []byte
|
||||
// pattern describes how to use data to construct the hash-input.
|
||||
// For every entry n at even indices this repeats the data n times.
|
||||
// For every entry m at odd indices this repeats a null-byte m times.
|
||||
// The input-data is constructed by concatinating the results in order.
|
||||
pattern []int64
|
||||
out []byte
|
||||
name string
|
||||
}{
|
||||
{
|
||||
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
|
||||
[]int64{64},
|
||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
||||
"documentation-example L0",
|
||||
},
|
||||
{
|
||||
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
|
||||
[]int64{64 * 256},
|
||||
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
|
||||
"documentation-example L1",
|
||||
},
|
||||
{
|
||||
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
|
||||
[]int64{64 * 256, 0, 64 * 128, 4096 * 128, 64*2 + 32},
|
||||
mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"),
|
||||
"documentation-example L2",
|
||||
},
|
||||
{
|
||||
[]byte("hello rclone\n"),
|
||||
[]int64{316},
|
||||
mustDecode("72370f9c18a2c20b31d71f3f4cee7a3cd2703737"),
|
||||
"not-block-aligned",
|
||||
},
|
||||
{
|
||||
[]byte("hello rclone\n"),
|
||||
[]int64{13, 4096 * 3, 4},
|
||||
mustDecode("a6990b81791f0d2db750b38f046df321c975aa60"),
|
||||
"not-block-aligned-with-null-bytes",
|
||||
},
|
||||
{
|
||||
[]byte{},
|
||||
[]int64{},
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
"empty",
|
||||
},
|
||||
{
|
||||
[]byte{},
|
||||
[]int64{0, 4096 * 256 * 256},
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
"null-bytes",
|
||||
},
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func TestLevelAdd(t *testing.T) {
|
||||
for _, test := range testTableLevelPositionEmbedded {
|
||||
l := hidrivehash.NewLevel().(internal.LevelHash)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
for i := range test.ins {
|
||||
l.Add(test.ins[i])
|
||||
assert.Equal(t, test.outs[i], l.Sum(nil))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLevelWrite(t *testing.T) {
|
||||
for _, test := range testTableLevel {
|
||||
l := hidrivehash.NewLevel()
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
for i := range test.ins {
|
||||
l.Write(test.ins[i])
|
||||
assert.Equal(t, test.outs[i], l.Sum(nil))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLevelIsFull(t *testing.T) {
|
||||
content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
|
||||
l := hidrivehash.NewLevel()
|
||||
for i := 0; i < 256; i++ {
|
||||
assert.False(t, l.(internal.LevelHash).IsFull())
|
||||
written, err := l.Write(content[:])
|
||||
assert.Equal(t, len(content), written)
|
||||
if !assert.NoError(t, err) {
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
assert.True(t, l.(internal.LevelHash).IsFull())
|
||||
written, err := l.Write(content[:])
|
||||
assert.True(t, l.(internal.LevelHash).IsFull())
|
||||
assert.Equal(t, 0, written)
|
||||
assert.ErrorIs(t, err, hidrivehash.ErrorHashFull)
|
||||
}
|
||||
|
||||
func TestLevelReset(t *testing.T) {
|
||||
l := hidrivehash.NewLevel()
|
||||
zeroHash := l.Sum(nil)
|
||||
_, err := l.Write([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19})
|
||||
if assert.NoError(t, err) {
|
||||
assert.NotEqual(t, zeroHash, l.Sum(nil))
|
||||
l.Reset()
|
||||
assert.Equal(t, zeroHash, l.Sum(nil))
|
||||
}
|
||||
}
|
||||
|
||||
func TestLevelSize(t *testing.T) {
|
||||
l := hidrivehash.NewLevel()
|
||||
assert.Equal(t, 20, l.Size())
|
||||
}
|
||||
|
||||
func TestLevelBlockSize(t *testing.T) {
|
||||
l := hidrivehash.NewLevel()
|
||||
assert.Equal(t, 20, l.BlockSize())
|
||||
}
|
||||
|
||||
func TestLevelBinaryMarshaler(t *testing.T) {
|
||||
content := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
|
||||
l := hidrivehash.NewLevel().(internal.LevelHash)
|
||||
l.Write(content[:10])
|
||||
encoded, err := l.MarshalBinary()
|
||||
if assert.NoError(t, err) {
|
||||
d := hidrivehash.NewLevel().(internal.LevelHash)
|
||||
err = d.UnmarshalBinary(encoded)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, l.Sum(nil), d.Sum(nil))
|
||||
l.Write(content[10:])
|
||||
d.Write(content[10:])
|
||||
assert.Equal(t, l.Sum(nil), d.Sum(nil))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLevelInvalidEncoding(t *testing.T) {
|
||||
l := hidrivehash.NewLevel().(internal.LevelHash)
|
||||
err := l.UnmarshalBinary([]byte{})
|
||||
assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
type infiniteReader struct {
|
||||
source []byte
|
||||
offset int
|
||||
}
|
||||
|
||||
func (m *infiniteReader) Read(b []byte) (int, error) {
|
||||
count := copy(b, m.source[m.offset:])
|
||||
m.offset += count
|
||||
m.offset %= len(m.source)
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func writeInChunks(writer io.Writer, chunkSize int64, data []byte, pattern []int64) error {
|
||||
readers := make([]io.Reader, len(pattern))
|
||||
nullBytes := [4096]byte{}
|
||||
for i, n := range pattern {
|
||||
if i%2 == 0 {
|
||||
readers[i] = io.LimitReader(&infiniteReader{data, 0}, n*int64(len(data)))
|
||||
} else {
|
||||
readers[i] = io.LimitReader(&infiniteReader{nullBytes[:], 0}, n)
|
||||
}
|
||||
}
|
||||
reader := io.MultiReader(readers...)
|
||||
for {
|
||||
_, err := io.CopyN(writer, reader, chunkSize)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrite(t *testing.T) {
|
||||
for _, test := range testTable {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern)
|
||||
if assert.NoError(t, err) {
|
||||
normalSum := h.Sum(nil)
|
||||
assert.Equal(t, test.out, normalSum)
|
||||
// Test if different block-sizes produce differing results.
|
||||
for _, blockSize := range []int64{397, 512, 4091, 8192, 10000} {
|
||||
t.Run(fmt.Sprintf("block-size %v", blockSize), func(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
err := writeInChunks(h, blockSize, test.data, test.pattern)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, normalSum, h.Sum(nil))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReset(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
zeroHash := h.Sum(nil)
|
||||
_, err := h.Write([]byte{1})
|
||||
if assert.NoError(t, err) {
|
||||
assert.NotEqual(t, zeroHash, h.Sum(nil))
|
||||
h.Reset()
|
||||
assert.Equal(t, zeroHash, h.Sum(nil))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
assert.Equal(t, 20, h.Size())
|
||||
}
|
||||
|
||||
func TestBlockSize(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
assert.Equal(t, 4096, h.BlockSize())
|
||||
}
|
||||
|
||||
func TestBinaryMarshaler(t *testing.T) {
|
||||
for _, test := range testTable {
|
||||
h := hidrivehash.New()
|
||||
d := hidrivehash.New()
|
||||
half := len(test.pattern) / 2
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[:half])
|
||||
assert.NoError(t, err)
|
||||
encoded, err := h.(encoding.BinaryMarshaler).MarshalBinary()
|
||||
if assert.NoError(t, err) {
|
||||
err = d.(encoding.BinaryUnmarshaler).UnmarshalBinary(encoded)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, h.Sum(nil), d.Sum(nil))
|
||||
err = writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[half:])
|
||||
assert.NoError(t, err)
|
||||
err = writeInChunks(d, int64(d.BlockSize()), test.data, test.pattern[half:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, h.Sum(nil), d.Sum(nil))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidEncoding(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
err := h.(encoding.BinaryUnmarshaler).UnmarshalBinary([]byte{})
|
||||
assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding)
|
||||
}
|
||||
|
||||
func TestSum(t *testing.T) {
|
||||
assert.Equal(t, [hidrivehash.Size]byte{}, hidrivehash.Sum([]byte{}))
|
||||
content := []byte{1}
|
||||
h := hidrivehash.New()
|
||||
h.Write(content)
|
||||
sum := hidrivehash.Sum(content)
|
||||
assert.Equal(t, h.Sum(nil), sum[:])
|
||||
}
|
||||
17
backend/hidrive/hidrivehash/internal/internal.go
Normal file
17
backend/hidrive/hidrivehash/internal/internal.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// LevelHash is an internal interface for level-hashes.
|
||||
type LevelHash interface {
|
||||
encoding.BinaryMarshaler
|
||||
encoding.BinaryUnmarshaler
|
||||
hash.Hash
|
||||
// Add takes a position-embedded checksum and adds it to the level.
|
||||
Add(sum []byte)
|
||||
// IsFull returns whether the number of checksums added to this level reached its capacity.
|
||||
IsFull() bool
|
||||
}
|
||||
@@ -6,6 +6,8 @@ package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
@@ -16,7 +18,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
@@ -34,11 +35,11 @@ var (
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "http",
|
||||
Description: "http Connection",
|
||||
Description: "HTTP",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of http host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "headers",
|
||||
@@ -49,10 +50,9 @@ Use this to set additional HTTP headers for all transactions.
|
||||
The input format is comma separated list of key,value pairs. Standard
|
||||
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
||||
|
||||
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
||||
`,
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -73,8 +73,9 @@ directories.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head",
|
||||
Help: `Don't use HEAD requests to find file sizes in dir listing.
|
||||
Help: `Don't use HEAD requests.
|
||||
|
||||
HEAD requests are mainly used to find file sizes in dir listing.
|
||||
If your site is being very slow to load then you can try this option.
|
||||
Normally rclone does a HEAD request for each potential file in a
|
||||
directory listing to:
|
||||
@@ -83,12 +84,9 @@ directory listing to:
|
||||
- check it really exists
|
||||
- check to see if it is a directory
|
||||
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
|
||||
- directory listings are much quicker
|
||||
- rclone won't have the times or sizes of any files
|
||||
- some files that don't exist may be in the listing
|
||||
`,
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
that directory listings are much quicker, but rclone won't have the times or
|
||||
sizes of any files, and some files that don't exist may be in the listing.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -132,11 +130,87 @@ func statusError(res *http.Response, err error) error {
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
_ = res.Body.Close()
|
||||
return errors.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
|
||||
return fmt.Errorf("HTTP Error: %s", res.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getFsEndpoint decides if url is to be considered a file or directory,
|
||||
// and returns a proper endpoint url to use for the fs.
|
||||
func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Options) (string, bool) {
|
||||
// If url ends with '/' it is already a proper url always assumed to be a directory.
|
||||
if url[len(url)-1] == '/' {
|
||||
return url, false
|
||||
}
|
||||
|
||||
// If url does not end with '/' we send a HEAD request to decide
|
||||
// if it is directory or file, and if directory appends the missing
|
||||
// '/', or if file returns the directory url to parent instead.
|
||||
createFileResult := func() (string, bool) {
|
||||
fs.Debugf(nil, "If path is a directory you must add a trailing '/'")
|
||||
parent, _ := path.Split(url)
|
||||
return parent, true
|
||||
}
|
||||
createDirResult := func() (string, bool) {
|
||||
fs.Debugf(nil, "To avoid the initial HEAD request add a trailing '/' to the path")
|
||||
return url + "/", false
|
||||
}
|
||||
|
||||
// If HEAD requests are not allowed we just have to assume it is a file.
|
||||
if opt.NoHead {
|
||||
fs.Debugf(nil, "Assuming path is a file as --http-no-head is set")
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
// Use a client which doesn't follow redirects so the server
|
||||
// doesn't redirect http://host/dir to http://host/dir/
|
||||
noRedir := *client
|
||||
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be created: %v", err)
|
||||
return createFileResult()
|
||||
}
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
|
||||
return createFileResult()
|
||||
}
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
fs.Debugf(nil, "Assuming path is a directory as HEAD response is it does not exist as a file (%s)", res.Status)
|
||||
return createDirResult()
|
||||
}
|
||||
if res.StatusCode == http.StatusMovedPermanently ||
|
||||
res.StatusCode == http.StatusFound ||
|
||||
res.StatusCode == http.StatusSeeOther ||
|
||||
res.StatusCode == http.StatusTemporaryRedirect ||
|
||||
res.StatusCode == http.StatusPermanentRedirect {
|
||||
redir := res.Header.Get("Location")
|
||||
if redir != "" {
|
||||
if redir[len(redir)-1] == '/' {
|
||||
fs.Debugf(nil, "Assuming path is a directory as HEAD response is redirect (%s) to a path that ends with '/': %s", res.Status, redir)
|
||||
return createDirResult()
|
||||
}
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) to a path that does not end with '/': %s", res.Status, redir)
|
||||
return createFileResult()
|
||||
}
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) but no location header", res.Status)
|
||||
return createFileResult()
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
// Example is 403 (http.StatusForbidden) for servers not allowing HEAD requests.
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is an error (%s)", res.Status)
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is success (%s)", res.Status)
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -167,37 +241,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
var isFile = false
|
||||
if !strings.HasSuffix(u.String(), "/") {
|
||||
// Make a client which doesn't follow redirects so the server
|
||||
// doesn't redirect http://host/dir to http://host/dir/
|
||||
noRedir := *client
|
||||
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
// check to see if points to a file
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
|
||||
if err == nil {
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
err = statusError(res, err)
|
||||
if err == nil {
|
||||
isFile = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newRoot := u.String()
|
||||
if isFile {
|
||||
// Point to the parent if this is a file
|
||||
newRoot, _ = path.Split(u.String())
|
||||
} else {
|
||||
if !strings.HasSuffix(newRoot, "/") {
|
||||
newRoot += "/"
|
||||
}
|
||||
}
|
||||
|
||||
u, err = url.Parse(newRoot)
|
||||
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
|
||||
fs.Debugf(nil, "Root: %s", endpoint)
|
||||
u, err = url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -215,12 +261,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if isFile {
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(f.endpointURL, "/") {
|
||||
return nil, errors.New("internal error: url doesn't end with /")
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -296,7 +346,7 @@ func parseName(base *url.URL, name string) (string, error) {
|
||||
}
|
||||
// check it doesn't have URL parameters
|
||||
uStr := u.String()
|
||||
if strings.Index(uStr, "?") >= 0 {
|
||||
if strings.Contains(uStr, "?") {
|
||||
return "", errFoundQuestionMark
|
||||
}
|
||||
// check that this is going back to the same host and scheme
|
||||
@@ -377,15 +427,15 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
URL := f.url(dir)
|
||||
u, err := url.Parse(URL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to readDir")
|
||||
return nil, fmt.Errorf("failed to readDir: %w", err)
|
||||
}
|
||||
if !strings.HasSuffix(URL, "/") {
|
||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
return nil, fmt.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
}
|
||||
// Do the request
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "readDir failed")
|
||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
||||
}
|
||||
f.addHeaders(req)
|
||||
res, err := f.httpClient.Do(req)
|
||||
@@ -397,7 +447,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to readDir")
|
||||
return nil, fmt.Errorf("failed to readDir: %w", err)
|
||||
}
|
||||
|
||||
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
|
||||
@@ -405,10 +455,10 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
case "text/html":
|
||||
names, err = parse(u, res.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "readDir")
|
||||
return nil, fmt.Errorf("readDir: %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("Can't parse content type %q", contentType)
|
||||
return nil, fmt.Errorf("can't parse content type %q", contentType)
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
@@ -428,7 +478,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
names, err := f.readDir(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
||||
return nil, fmt.Errorf("error listing %q: %w", dir, err)
|
||||
}
|
||||
var (
|
||||
entriesMu sync.Mutex // to protect entries
|
||||
@@ -540,7 +590,7 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
url := o.url()
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "stat failed")
|
||||
return fmt.Errorf("stat failed: %w", err)
|
||||
}
|
||||
o.fs.addHeaders(req)
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
@@ -549,7 +599,7 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stat")
|
||||
return fmt.Errorf("failed to stat: %w", err)
|
||||
}
|
||||
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
@@ -562,7 +612,7 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
if o.fs.opt.NoSlash {
|
||||
mediaType, _, err := mime.ParseMediaType(o.contentType)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
|
||||
return fmt.Errorf("failed to parse Content-Type: %q: %w", o.contentType, err)
|
||||
}
|
||||
if mediaType == "text/html" {
|
||||
return fs.ErrorNotAFile
|
||||
@@ -588,7 +638,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
url := o.url()
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
|
||||
// Add optional headers
|
||||
@@ -601,7 +651,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -24,10 +26,11 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
remoteName = "TestHTTP"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||
remoteName = "TestHTTP"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||
lineEndSize = 1
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
@@ -35,6 +38,22 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
// verify the file path is correct, and also check which line endings
|
||||
// are used to get sizes right ("\n" except on Windows, but even there
|
||||
// we may have "\n" or "\r\n" depending on git crlf setting)
|
||||
fileList, err := ioutil.ReadDir(filesPath)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(fileList), 0)
|
||||
for _, file := range fileList {
|
||||
if !file.IsDir() {
|
||||
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
|
||||
if strings.HasSuffix(string(data), "\r\n") {
|
||||
lineEndSize = 2
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// test the headers are there then pass on to fileServer
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
@@ -91,7 +110,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
|
||||
e = entries[1]
|
||||
assert.Equal(t, "one%.txt", e.Remote())
|
||||
assert.Equal(t, int64(6), e.Size())
|
||||
assert.Equal(t, int64(5+lineEndSize), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
@@ -108,7 +127,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
_, ok = e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
} else {
|
||||
assert.Equal(t, int64(41), e.Size())
|
||||
assert.Equal(t, int64(40+lineEndSize), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
@@ -141,7 +160,7 @@ func TestListSubDir(t *testing.T) {
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "three/underthree.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
assert.Equal(t, int64(8+lineEndSize), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
@@ -154,7 +173,7 @@ func TestNewObject(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "four/under four.txt", o.Remote())
|
||||
assert.Equal(t, int64(9), o.Size())
|
||||
assert.Equal(t, int64(8+lineEndSize), o.Size())
|
||||
_, ok := o.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
@@ -187,7 +206,11 @@ func TestOpen(t *testing.T) {
|
||||
data, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
if lineEndSize == 2 {
|
||||
assert.Equal(t, "beetroot\r\n", string(data))
|
||||
} else {
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
}
|
||||
|
||||
// Test with range request
|
||||
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||
@@ -236,7 +259,7 @@ func TestIsAFileSubDir(t *testing.T) {
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "underthree.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
assert.Equal(t, int64(8+lineEndSize), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
@@ -353,3 +376,106 @@ func TestParseCaddy(t *testing.T) {
|
||||
"v1.36-22-g06ea13a-ssh-agentβ/",
|
||||
})
|
||||
}
|
||||
|
||||
func TestFsNoSlashRoots(t *testing.T) {
|
||||
// Test Fs with roots that does not end with '/', the logic that
|
||||
// decides if url is to be considered a file or directory, based
|
||||
// on result from a HEAD request.
|
||||
|
||||
// Handler for faking HEAD responses with different status codes
|
||||
headCount := 0
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "HEAD" {
|
||||
headCount++
|
||||
responseCode, err := strconv.Atoi(path.Base(r.URL.String()))
|
||||
require.NoError(t, err)
|
||||
if strings.HasPrefix(r.URL.String(), "/redirect/") {
|
||||
var redir string
|
||||
if strings.HasPrefix(r.URL.String(), "/redirect/file/") {
|
||||
redir = "/redirected"
|
||||
} else if strings.HasPrefix(r.URL.String(), "/redirect/dir/") {
|
||||
redir = "/redirected/"
|
||||
} else {
|
||||
require.Fail(t, "Redirect test requests must start with '/redirect/file/' or '/redirect/dir/'")
|
||||
}
|
||||
http.Redirect(w, r, redir, responseCode)
|
||||
} else {
|
||||
http.Error(w, http.StatusText(responseCode), responseCode)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(handler)
|
||||
defer ts.Close()
|
||||
|
||||
// Configure the remote
|
||||
configfile.Install()
|
||||
m := configmap.Simple{
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
}
|
||||
|
||||
// Test
|
||||
for i, test := range []struct {
|
||||
root string
|
||||
isFile bool
|
||||
}{
|
||||
// 2xx success
|
||||
{"parent/200", true},
|
||||
{"parent/204", true},
|
||||
|
||||
// 3xx redirection Redirect status 301, 302, 303, 307, 308
|
||||
{"redirect/file/301", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/301", false}, // Request is redirected to "/redirected/"
|
||||
{"redirect/file/302", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/302", false}, // Request is redirected to "/redirected/"
|
||||
{"redirect/file/303", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/303", false}, // Request is redirected to "/redirected/"
|
||||
|
||||
{"redirect/file/304", true}, // Not really a redirect, handled like 4xx errors (below)
|
||||
{"redirect/file/305", true}, // Not really a redirect, handled like 4xx errors (below)
|
||||
{"redirect/file/306", true}, // Not really a redirect, handled like 4xx errors (below)
|
||||
|
||||
{"redirect/file/307", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/307", false}, // Request is redirected to "/redirected/"
|
||||
{"redirect/file/308", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/308", false}, // Request is redirected to "/redirected/"
|
||||
|
||||
// 4xx client errors
|
||||
{"parent/403", true}, // Forbidden status (head request blocked)
|
||||
{"parent/404", false}, // Not found status
|
||||
} {
|
||||
for _, noHead := range []bool{false, true} {
|
||||
var isFile bool
|
||||
if noHead {
|
||||
m.Set("no_head", "true")
|
||||
isFile = true
|
||||
} else {
|
||||
m.Set("no_head", "false")
|
||||
isFile = test.isFile
|
||||
}
|
||||
headCount = 0
|
||||
f, err := NewFs(context.Background(), remoteName, test.root, m)
|
||||
if noHead {
|
||||
assert.Equal(t, 0, headCount)
|
||||
} else {
|
||||
assert.Equal(t, 1, headCount)
|
||||
}
|
||||
if isFile {
|
||||
assert.ErrorIs(t, err, fs.ErrorIsFile)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
var endpoint string
|
||||
if isFile {
|
||||
parent, _ := path.Split(test.root)
|
||||
endpoint = "/" + parent
|
||||
} else {
|
||||
endpoint = "/" + test.root + "/"
|
||||
}
|
||||
what := fmt.Sprintf("i=%d, root=%q, isFile=%v, noHead=%v", i, test.root, isFile, noHead)
|
||||
assert.Equal(t, ts.URL+endpoint, f.String(), what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ package hubic
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -16,7 +17,6 @@ import (
|
||||
"time"
|
||||
|
||||
swiftLib "github.com/ncw/swift/v2"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -119,8 +119,8 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
|
||||
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
bodyStr := strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " "))
|
||||
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
}
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
var result credentials
|
||||
@@ -138,7 +138,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
f.expires = expires
|
||||
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, f.expires.Sub(time.Now()))
|
||||
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, time.Until(f.expires))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -146,7 +146,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Hubic")
|
||||
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
@@ -163,7 +163,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
err = c.Authenticate(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error authenticating swift connection")
|
||||
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
|
||||
}
|
||||
|
||||
// Parse config into swift.Options struct
|
||||
|
||||
1295
backend/internetarchive/internetarchive.go
Normal file
1295
backend/internetarchive/internetarchive.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/internetarchive/internetarchive_test.go
Normal file
17
backend/internetarchive/internetarchive_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test internetarchive filesystem interface
|
||||
package internetarchive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/internetarchive"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestIA:lesmi-rclone-test/",
|
||||
NilObject: (*internetarchive.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -2,49 +2,75 @@ package api
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// default time format for almost all request and responses
|
||||
timeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
// the API server seems to use a different format
|
||||
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
|
||||
// default time format historically used for all request and responses.
|
||||
// Similar to time.RFC3339, but with an extra '-' in front of 'T',
|
||||
// and no ':' separator in timezone offset. Some newer endpoints have
|
||||
// moved to proper time.RFC3339 conformant format instead.
|
||||
jottaTimeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
)
|
||||
|
||||
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
|
||||
type Time time.Time
|
||||
|
||||
// UnmarshalXML turns XML into a Time
|
||||
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// unmarshalXML turns XML into a Time
|
||||
func unmarshalXMLTime(d *xml.Decoder, start xml.StartElement, timeFormat string) (time.Time, error) {
|
||||
var v string
|
||||
if err := d.DecodeElement(&v, &start); err != nil {
|
||||
return err
|
||||
return time.Time{}, err
|
||||
}
|
||||
if v == "" {
|
||||
*t = Time(time.Time{})
|
||||
return nil
|
||||
return time.Time{}, nil
|
||||
}
|
||||
newTime, err := time.Parse(timeFormat, v)
|
||||
if err == nil {
|
||||
*t = Time(newTime)
|
||||
return newTime, nil
|
||||
}
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
// JottaTime represents time values in the classic API using a custom RFC3339 like format
|
||||
type JottaTime time.Time
|
||||
|
||||
// String returns JottaTime string in Jottacloud classic format
|
||||
func (t JottaTime) String() string { return time.Time(t).Format(jottaTimeFormat) }
|
||||
|
||||
// UnmarshalXML turns XML into a JottaTime
|
||||
func (t *JottaTime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
tm, err := unmarshalXMLTime(d, start, jottaTimeFormat)
|
||||
*t = JottaTime(tm)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalXML turns a Time into XML
|
||||
func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
// MarshalXML turns a JottaTime into XML
|
||||
func (t *JottaTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(t.String(), start)
|
||||
}
|
||||
|
||||
// Return Time string in Jottacloud format
|
||||
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
// Rfc3339Time represents time values in the newer APIs using standard RFC3339 format
|
||||
type Rfc3339Time time.Time
|
||||
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
// String returns Rfc3339Time string in Jottacloud RFC3339 format
|
||||
func (t Rfc3339Time) String() string { return time.Time(t).Format(time.RFC3339) }
|
||||
|
||||
// UnmarshalXML turns XML into a Rfc3339Time
|
||||
func (t *Rfc3339Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
tm, err := unmarshalXMLTime(d, start, time.RFC3339)
|
||||
*t = Rfc3339Time(tm)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalXML turns a Rfc3339Time into XML
|
||||
func (t *Rfc3339Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(t.String(), start)
|
||||
}
|
||||
|
||||
// MarshalJSON turns a Rfc3339Time into JSON
|
||||
func (t *Rfc3339Time) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("\"%s\"", t.String())), nil
|
||||
}
|
||||
|
||||
// LoginToken is struct representing the login token generated in the WebUI
|
||||
type LoginToken struct {
|
||||
@@ -123,16 +149,11 @@ type AllocateFileResponse struct {
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
Path string `json:"path"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified int64 `json:"modified"`
|
||||
}
|
||||
|
||||
// DeviceRegistrationResponse is the response to registering a device
|
||||
@@ -339,9 +360,9 @@ type JottaFolder struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
Path string `xml:"path"`
|
||||
CreatedAt Time `xml:"created"`
|
||||
ModifiedAt Time `xml:"modified"`
|
||||
Updated Time `xml:"updated"`
|
||||
CreatedAt JottaTime `xml:"created"`
|
||||
ModifiedAt JottaTime `xml:"modified"`
|
||||
Updated JottaTime `xml:"updated"`
|
||||
Folders []JottaFolder `xml:"folders>folder"`
|
||||
Files []JottaFile `xml:"files>file"`
|
||||
}
|
||||
@@ -366,17 +387,17 @@ GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
|
||||
// JottaFile represents a Jottacloud file
|
||||
type JottaFile struct {
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicURI string `xml:"publicURI"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt Time `xml:"currentRevision>created"`
|
||||
ModifiedAt Time `xml:"currentRevision>modified"`
|
||||
Updated Time `xml:"currentRevision>updated"`
|
||||
Size int64 `xml:"currentRevision>size"`
|
||||
MimeType string `xml:"currentRevision>mime"`
|
||||
MD5 string `xml:"currentRevision>md5"`
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicURI string `xml:"publicURI"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt JottaTime `xml:"currentRevision>created"`
|
||||
ModifiedAt JottaTime `xml:"currentRevision>modified"`
|
||||
Updated JottaTime `xml:"currentRevision>updated"`
|
||||
Size int64 `xml:"currentRevision>size"`
|
||||
MimeType string `xml:"currentRevision>mime"`
|
||||
MD5 string `xml:"currentRevision>md5"`
|
||||
}
|
||||
|
||||
// Error is a custom Error for wrapping Jottacloud error responses
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -28,33 +28,57 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "koofr",
|
||||
Description: "Koofr",
|
||||
Description: "Koofr, Digi Storage and other Koofr-compatible storage providers",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose your storage provider.",
|
||||
// NOTE if you add a new provider here, then add it in the
|
||||
// setProviderDefaults() function and update options accordingly
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "koofr",
|
||||
Help: "Koofr, https://app.koofr.net/",
|
||||
}, {
|
||||
Value: "digistorage",
|
||||
Help: "Digi Storage, https://storage.rcs-rds.ro/",
|
||||
}, {
|
||||
Value: "other",
|
||||
Help: "Any other Koofr API compatible storage service",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use.",
|
||||
Default: "https://app.koofr.net",
|
||||
Provider: "other",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "setmtime",
|
||||
Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||
Default: true,
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name.",
|
||||
Help: "Your user name.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
Provider: "koofr",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password).",
|
||||
Provider: "digistorage",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at your service's settings page).",
|
||||
Provider: "other",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
@@ -71,6 +95,7 @@ func init() {
|
||||
|
||||
// Options represent the configuration of the Koofr backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
MountID string `config:"mountid"`
|
||||
User string `config:"user"`
|
||||
@@ -255,13 +280,38 @@ func (f *Fs) fullPath(part string) string {
|
||||
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
func setProviderDefaults(opt *Options) {
|
||||
// handle old, provider-less configs
|
||||
if opt.Provider == "" {
|
||||
if opt.Endpoint == "" || strings.HasPrefix(opt.Endpoint, "https://app.koofr.net") {
|
||||
opt.Provider = "koofr"
|
||||
} else if strings.HasPrefix(opt.Endpoint, "https://storage.rcs-rds.ro") {
|
||||
opt.Provider = "digistorage"
|
||||
} else {
|
||||
opt.Provider = "other"
|
||||
}
|
||||
}
|
||||
// now assign an endpoint
|
||||
if opt.Provider == "koofr" {
|
||||
opt.Endpoint = "https://app.koofr.net"
|
||||
} else if opt.Provider == "digistorage" {
|
||||
opt.Endpoint = "https://storage.rcs-rds.ro"
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and rclone configuration options
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setProviderDefaults(opt)
|
||||
return NewFsFromOptions(ctx, name, root, opt)
|
||||
}
|
||||
|
||||
// NewFsFromOptions constructs a new filesystem given a root path and internal configuration options
|
||||
func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff fs.Fs, err error) {
|
||||
pass, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -301,9 +351,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
}
|
||||
if f.mountID == "" {
|
||||
if opt.MountID == "" {
|
||||
return nil, errors.New("Failed to find primary mount")
|
||||
return nil, errors.New("failed to find primary mount")
|
||||
}
|
||||
return nil, errors.New("Failed to find mount " + opt.MountID)
|
||||
return nil, errors.New("failed to find mount " + opt.MountID)
|
||||
}
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, f.opt.Enc.FromStandardPath("/"+f.root))
|
||||
if err == nil && rootFile.Type != "dir" {
|
||||
|
||||
@@ -5,10 +5,10 @@ package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
@@ -20,7 +20,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||
return nil, fmt.Errorf("failed to read disk usage: %w", err)
|
||||
}
|
||||
bs := int64(s.Bsize) // nolint: unconvert
|
||||
usage := &fs.Usage{
|
||||
|
||||
@@ -5,10 +5,10 @@ package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
@@ -17,14 +17,18 @@ var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSp
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var available, total, free int64
|
||||
root, e := syscall.UTF16PtrFromString(f.root)
|
||||
if e != nil {
|
||||
return nil, fmt.Errorf("failed to read disk usage: %w", e)
|
||||
}
|
||||
_, _, e1 := getFreeDiskSpace.Call(
|
||||
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
|
||||
uintptr(unsafe.Pointer(root)),
|
||||
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
|
||||
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
|
||||
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
||||
)
|
||||
if e1 != syscall.Errno(0) {
|
||||
return nil, errors.Wrap(e1, "failed to read disk usage")
|
||||
return nil, fmt.Errorf("failed to read disk usage: %w", e1)
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(total), // quota of bytes that can be used
|
||||
|
||||
@@ -4,6 +4,7 @@ package local
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -16,7 +17,6 @@ import (
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -42,9 +42,22 @@ func init() {
|
||||
Description: "Local Disk",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: systemMetadataInfo,
|
||||
Help: `Depending on which OS is in use the local backend may return only some
|
||||
of the system metadata. Setting system metadata is supported on all
|
||||
OSes but setting user metadata is only supported on linux, freebsd,
|
||||
netbsd, macOS and Solaris. It is **not** supported on Windows yet
|
||||
([see pkg/attrs#47](https://github.com/pkg/xattr/issues/47)).
|
||||
|
||||
User metadata is stored as extended attributes (which may not be
|
||||
supported by all file systems) under the "user.*" prefix.
|
||||
`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Default: false,
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
@@ -279,6 +292,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
CanHaveEmptyDirectories: true,
|
||||
IsLocal: true,
|
||||
SlowHash: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
||||
}).Fill(ctx, f)
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
@@ -432,7 +448,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
fd, err := os.Open(fsDirPath)
|
||||
if err != nil {
|
||||
isPerm := os.IsPermission(err)
|
||||
err = errors.Wrapf(err, "failed to open directory %q", dir)
|
||||
err = fmt.Errorf("failed to open directory %q: %w", dir, err)
|
||||
fs.Errorf(dir, "%v", err)
|
||||
if isPerm {
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
|
||||
@@ -443,7 +459,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
defer func() {
|
||||
cerr := fd.Close()
|
||||
if cerr != nil && err == nil {
|
||||
err = errors.Wrapf(cerr, "failed to close directory %q:", dir)
|
||||
err = fmt.Errorf("failed to close directory %q:: %w", dir, cerr)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -473,7 +489,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
if fierr != nil {
|
||||
err = errors.Wrapf(err, "failed to read directory %q", namepath)
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
@@ -483,7 +499,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read directory entry")
|
||||
return nil, fmt.Errorf("failed to read directory entry: %w", err)
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
@@ -496,7 +512,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
fi, err = os.Stat(localPath)
|
||||
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
||||
// Skip bad symlinks and circular symlinks
|
||||
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
||||
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
|
||||
fs.Errorf(newRemote, "Listing error: %v", err)
|
||||
err = accounting.Stats(ctx).Error(err)
|
||||
continue
|
||||
@@ -672,7 +688,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
if !fi.Mode().IsDir() {
|
||||
return errors.Errorf("can't purge non directory: %q", dir)
|
||||
return fmt.Errorf("can't purge non directory: %q", dir)
|
||||
}
|
||||
return os.RemoveAll(dir)
|
||||
}
|
||||
@@ -866,12 +882,12 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
err := o.lstat()
|
||||
var changed bool
|
||||
if err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// If file not found then we assume any accumulated
|
||||
// hashes are OK - this will error on Open
|
||||
changed = true
|
||||
} else {
|
||||
return "", errors.Wrap(err, "hash: failed to stat")
|
||||
return "", fmt.Errorf("hash: failed to stat: %w", err)
|
||||
}
|
||||
} else {
|
||||
o.fs.objectMetaMu.RLock()
|
||||
@@ -900,16 +916,16 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
in = readers.NewLimitedReadCloser(in, o.size)
|
||||
}
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to open")
|
||||
return "", fmt.Errorf("hash: failed to open: %w", err)
|
||||
}
|
||||
var hashes map[hash.Type]string
|
||||
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
|
||||
hashes, err = hash.StreamTypes(readers.NewContextReader(ctx, in), hash.NewHashSet(r))
|
||||
closeErr := in.Close()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to read")
|
||||
return "", fmt.Errorf("hash: failed to read: %w", err)
|
||||
}
|
||||
if closeErr != nil {
|
||||
return "", errors.Wrap(closeErr, "hash: failed to close")
|
||||
return "", fmt.Errorf("hash: failed to close: %w", closeErr)
|
||||
}
|
||||
hashValue = hashes[r]
|
||||
o.fs.objectMetaMu.Lock()
|
||||
@@ -937,17 +953,22 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Set the atime and ltime of the object
|
||||
func (o *Object) setTimes(atime, mtime time.Time) (err error) {
|
||||
if o.translatedLink {
|
||||
err = lChtimes(o.path, atime, mtime)
|
||||
} else {
|
||||
err = os.Chtimes(o.path, atime, mtime)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if o.fs.opt.NoSetModTime {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
if o.translatedLink {
|
||||
err = lChtimes(o.path, modTime, modTime)
|
||||
} else {
|
||||
err = os.Chtimes(o.path, modTime, modTime)
|
||||
}
|
||||
err := o.setTimes(modTime, modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -990,17 +1011,17 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||
// Check if file has the same size and modTime
|
||||
fi, err := file.fd.Stat()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "can't read status of source file while transferring")
|
||||
return 0, fmt.Errorf("can't read status of source file while transferring: %w", err)
|
||||
}
|
||||
file.o.fs.objectMetaMu.RLock()
|
||||
oldtime := file.o.modTime
|
||||
oldsize := file.o.size
|
||||
file.o.fs.objectMetaMu.RUnlock()
|
||||
if oldsize != fi.Size() {
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
|
||||
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
|
||||
}
|
||||
if !oldtime.Equal(fi.ModTime()) {
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
|
||||
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1133,6 +1154,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// Wipe hashes before update
|
||||
o.clearHashCache()
|
||||
|
||||
var symlinkData bytes.Buffer
|
||||
// If the object is a regular file, create it.
|
||||
// If it is a translated link, just read in the contents, and
|
||||
@@ -1219,6 +1243,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// Fetch and set metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, src, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
err = o.writeMetadata(meta)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set metadata: %w", err)
|
||||
}
|
||||
|
||||
// ReRead info now that we have finished
|
||||
return o.lstat()
|
||||
}
|
||||
@@ -1295,6 +1329,13 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
// clearHashCache wipes any cached hashes for the object
|
||||
func (o *Object) clearHashCache() {
|
||||
o.fs.objectMetaMu.Lock()
|
||||
o.hashes = nil
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
}
|
||||
|
||||
// Stat an Object into info
|
||||
func (o *Object) lstat() error {
|
||||
info, err := o.fs.lstat(o.path)
|
||||
@@ -1306,9 +1347,38 @@ func (o *Object) lstat() error {
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
o.clearHashCache()
|
||||
return remove(o.path)
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
metadata, err = o.getXattr()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = o.readMetadataFromFile(&metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// Write the metadata on the object
|
||||
func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
|
||||
err = o.setXattr(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = o.writeMetadataToFile(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
@@ -1348,4 +1418,5 @@ var (
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.OpenWriterAter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.Metadataer = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,17 +1,21 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -93,16 +97,16 @@ func TestSymlink(t *testing.T) {
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
|
||||
// Check with no symlink flags
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t)
|
||||
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2d)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
r.CheckLocalItems(t, file1, file2d)
|
||||
r.CheckRemoteItems(t)
|
||||
|
||||
// Set fs into "-l" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
@@ -111,7 +115,7 @@ func TestSymlink(t *testing.T) {
|
||||
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
r.CheckLocalItems(t, file1, file2)
|
||||
}
|
||||
|
||||
// Create a symlink
|
||||
@@ -119,7 +123,7 @@ func TestSymlink(t *testing.T) {
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
r.CheckLocalItems(t, file1, file2, file3)
|
||||
}
|
||||
|
||||
// Check it got the correct contents
|
||||
@@ -166,3 +170,199 @@ func TestSymlinkError(t *testing.T) {
|
||||
_, err := NewFs(context.Background(), "local", "/", m)
|
||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||
}
|
||||
|
||||
// Test hashes on updating an object
|
||||
func TestHashOnUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
o, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the hash is as we expect
|
||||
md5, err := o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Reupload it with diferent contents but same size and timestamp
|
||||
var b = bytes.NewBufferString("CONTENT")
|
||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||
err = o.Update(ctx, b, src)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check the hash is as expected
|
||||
md5, err = o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "45685e95985e20822fb2538a522a5ccf", md5)
|
||||
}
|
||||
|
||||
// Test hashes on deleting an object
|
||||
func TestHashOnDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
o, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the hash is as we expect
|
||||
md5, err := o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Delete the object
|
||||
require.NoError(t, o.Remove(ctx))
|
||||
|
||||
// Test the hash cache is empty
|
||||
require.Nil(t, o.(*Object).hashes)
|
||||
|
||||
// Test the hash returns an error
|
||||
_, err = o.Hash(ctx, hash.MD5)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "metafile.txt"
|
||||
when := time.Now()
|
||||
const dayLength = len("2001-01-01")
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
r.WriteFile(filePath, "metadata file contents", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
obj, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
features := f.Features()
|
||||
|
||||
var hasXID, hasAtime, hasBtime bool
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "freebsd", "netbsd", "linux":
|
||||
hasXID, hasAtime, hasBtime = true, true, true
|
||||
case "openbsd", "solaris":
|
||||
hasXID, hasAtime = true, true
|
||||
case "windows":
|
||||
hasAtime, hasBtime = true, true
|
||||
case "plan9", "js":
|
||||
// nada
|
||||
default:
|
||||
t.Errorf("No test cases for OS %q", runtime.GOOS)
|
||||
}
|
||||
|
||||
assert.True(t, features.ReadMetadata)
|
||||
assert.True(t, features.WriteMetadata)
|
||||
assert.Equal(t, xattrSupported, features.UserMetadata)
|
||||
|
||||
t.Run("Xattr", func(t *testing.T) {
|
||||
if !xattrSupported {
|
||||
t.Skip()
|
||||
}
|
||||
m, err := o.getXattr()
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
inM := fs.Metadata{
|
||||
"potato": "chips",
|
||||
"cabbage": "soup",
|
||||
}
|
||||
err = o.setXattr(inM)
|
||||
require.NoError(t, err)
|
||||
|
||||
m, err = o.getXattr()
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, m)
|
||||
assert.Equal(t, inM, m)
|
||||
})
|
||||
|
||||
checkTime := func(m fs.Metadata, key string, when time.Time) {
|
||||
mt, ok := o.parseMetadataTime(m, key)
|
||||
assert.True(t, ok)
|
||||
dt := mt.Sub(when)
|
||||
precision := time.Second
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
|
||||
}
|
||||
|
||||
checkInt := func(m fs.Metadata, key string, base int) int {
|
||||
value, ok := o.parseMetadataInt(m, key, base)
|
||||
assert.True(t, ok)
|
||||
return value
|
||||
}
|
||||
t.Run("Read", func(t *testing.T) {
|
||||
m, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, m)
|
||||
|
||||
// All OSes have these
|
||||
checkInt(m, "mode", 8)
|
||||
checkTime(m, "mtime", when)
|
||||
|
||||
assert.Equal(t, len(whenRFC), len(m["mtime"]))
|
||||
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
|
||||
|
||||
if hasAtime {
|
||||
checkTime(m, "atime", when)
|
||||
}
|
||||
if hasBtime {
|
||||
checkTime(m, "btime", when)
|
||||
}
|
||||
if hasXID {
|
||||
checkInt(m, "uid", 10)
|
||||
checkInt(m, "gid", 10)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Write", func(t *testing.T) {
|
||||
newAtimeString := "2011-12-13T14:15:16.999999999Z"
|
||||
newAtime := fstest.Time(newAtimeString)
|
||||
newMtimeString := "2011-12-12T14:15:16.999999999Z"
|
||||
newMtime := fstest.Time(newMtimeString)
|
||||
newBtimeString := "2011-12-11T14:15:16.999999999Z"
|
||||
newBtime := fstest.Time(newBtimeString)
|
||||
newM := fs.Metadata{
|
||||
"mtime": newMtimeString,
|
||||
"atime": newAtimeString,
|
||||
"btime": newBtimeString,
|
||||
// Can't test uid, gid without being root
|
||||
"mode": "0767",
|
||||
"potato": "wedges",
|
||||
}
|
||||
err := o.writeMetadata(newM)
|
||||
require.NoError(t, err)
|
||||
|
||||
m, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, m)
|
||||
|
||||
mode := checkInt(m, "mode", 8)
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
|
||||
}
|
||||
|
||||
checkTime(m, "mtime", newMtime)
|
||||
if hasAtime {
|
||||
checkTime(m, "atime", newAtime)
|
||||
}
|
||||
if haveSetBTime {
|
||||
checkTime(m, "btime", newBtime)
|
||||
}
|
||||
if xattrSupported {
|
||||
assert.Equal(t, "wedges", m["potato"])
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
@@ -11,7 +11,8 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "",
|
||||
NilObject: (*local.Object)(nil),
|
||||
RemoteName: "",
|
||||
NilObject: (*local.Object)(nil),
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
138
backend/local/metadata.go
Normal file
138
backend/local/metadata.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
const metadataTimeFormat = time.RFC3339Nano
|
||||
|
||||
// system metadata keys which this backend owns
|
||||
//
|
||||
// not all values supported on all OSes
|
||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||
"mode": {
|
||||
Help: "File type and mode",
|
||||
Type: "octal, unix style",
|
||||
Example: "0100664",
|
||||
},
|
||||
"uid": {
|
||||
Help: "User ID of owner",
|
||||
Type: "decimal number",
|
||||
Example: "500",
|
||||
},
|
||||
"gid": {
|
||||
Help: "Group ID of owner",
|
||||
Type: "decimal number",
|
||||
Example: "500",
|
||||
},
|
||||
"rdev": {
|
||||
Help: "Device ID (if special file)",
|
||||
Type: "hexadecimal",
|
||||
Example: "1abc",
|
||||
},
|
||||
"atime": {
|
||||
Help: "Time of last access",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
||||
},
|
||||
"mtime": {
|
||||
Help: "Time of last modification",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
||||
},
|
||||
"btime": {
|
||||
Help: "Time of file birth (creation)",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
||||
},
|
||||
}
|
||||
|
||||
// parse a time string from metadata with key
|
||||
func (o *Object) parseMetadataTime(m fs.Metadata, key string) (t time.Time, ok bool) {
|
||||
value, ok := m[key]
|
||||
if ok {
|
||||
var err error
|
||||
t, err = time.Parse(metadataTimeFormat, value)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
return t, ok
|
||||
}
|
||||
|
||||
// parse am int from metadata with key and base
|
||||
func (o *Object) parseMetadataInt(m fs.Metadata, key string, base int) (result int, ok bool) {
|
||||
value, ok := m[key]
|
||||
if ok {
|
||||
var err error
|
||||
result64, err := strconv.ParseInt(value, base, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
||||
ok = false
|
||||
}
|
||||
result = int(result64)
|
||||
}
|
||||
return result, ok
|
||||
}
|
||||
|
||||
// Write the metadata into the file
|
||||
//
|
||||
// It isn't possible to set the ctime and btime under Unix
|
||||
func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
||||
var err error
|
||||
atime, atimeOK := o.parseMetadataTime(m, "atime")
|
||||
mtime, mtimeOK := o.parseMetadataTime(m, "mtime")
|
||||
btime, btimeOK := o.parseMetadataTime(m, "btime")
|
||||
if atimeOK || mtimeOK {
|
||||
if atimeOK && !mtimeOK {
|
||||
mtime = atime
|
||||
}
|
||||
if !atimeOK && mtimeOK {
|
||||
atime = mtime
|
||||
}
|
||||
err = o.setTimes(atime, mtime)
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to set times: %w", err)
|
||||
}
|
||||
}
|
||||
if haveSetBTime {
|
||||
if btimeOK {
|
||||
err = setBTime(o.path, btime)
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
uid, hasUID := o.parseMetadataInt(m, "uid", 10)
|
||||
gid, hasGID := o.parseMetadataInt(m, "gid", 10)
|
||||
if hasUID {
|
||||
// FIXME should read UID and GID of current user and only attempt to set it if different
|
||||
if !hasGID {
|
||||
gid = uid
|
||||
}
|
||||
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
|
||||
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
|
||||
} else {
|
||||
err = os.Chown(o.path, uid, gid)
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change ownership: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
mode, hasMode := o.parseMetadataInt(m, "mode", 8)
|
||||
if hasMode {
|
||||
err = os.Chmod(o.path, os.FileMode(mode))
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||
}
|
||||
}
|
||||
// FIXME not parsing rdev yet
|
||||
return outErr
|
||||
}
|
||||
38
backend/local/metadata_bsd.go
Normal file
38
backend/local/metadata_bsd.go
Normal file
@@ -0,0 +1,38 @@
|
||||
//go:build darwin || freebsd || netbsd
|
||||
// +build darwin freebsd netbsd
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
info, err := o.fs.lstat(o.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat, ok := info.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
fs.Debugf(o, "didn't return Stat_t as expected")
|
||||
return nil
|
||||
}
|
||||
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
|
||||
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
|
||||
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
|
||||
if stat.Rdev != 0 {
|
||||
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
|
||||
}
|
||||
setTime := func(key string, t syscall.Timespec) {
|
||||
m.Set(key, time.Unix(t.Unix()).Format(metadataTimeFormat))
|
||||
}
|
||||
setTime("atime", stat.Atimespec)
|
||||
setTime("mtime", stat.Mtimespec)
|
||||
setTime("btime", stat.Birthtimespec)
|
||||
return nil
|
||||
}
|
||||
47
backend/local/metadata_linux.go
Normal file
47
backend/local/metadata_linux.go
Normal file
@@ -0,0 +1,47 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
flags := unix.AT_SYMLINK_NOFOLLOW
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
flags = 0
|
||||
}
|
||||
var stat unix.Statx_t
|
||||
err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 |
|
||||
unix.STATX_TYPE | // Want stx_mode & S_IFMT
|
||||
unix.STATX_MODE | // Want stx_mode & ~S_IFMT
|
||||
unix.STATX_UID | // Want stx_uid
|
||||
unix.STATX_GID | // Want stx_gid
|
||||
unix.STATX_ATIME | // Want stx_atime
|
||||
unix.STATX_MTIME | // Want stx_mtime
|
||||
unix.STATX_CTIME | // Want stx_ctime
|
||||
unix.STATX_BTIME), // Want stx_btime
|
||||
&stat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
|
||||
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
|
||||
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
|
||||
if stat.Rdev_major != 0 || stat.Rdev_minor != 0 {
|
||||
m.Set("rdev", fmt.Sprintf("%x", uint64(stat.Rdev_major)<<32|uint64(stat.Rdev_minor)))
|
||||
}
|
||||
setTime := func(key string, t unix.StatxTimestamp) {
|
||||
m.Set(key, time.Unix(t.Sec, int64(t.Nsec)).Format(metadataTimeFormat))
|
||||
}
|
||||
setTime("atime", stat.Atime)
|
||||
setTime("mtime", stat.Mtime)
|
||||
setTime("btime", stat.Btime)
|
||||
return nil
|
||||
}
|
||||
21
backend/local/metadata_other.go
Normal file
21
backend/local/metadata_other.go
Normal file
@@ -0,0 +1,21 @@
|
||||
//go:build plan9 || js
|
||||
// +build plan9 js
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
info, err := o.fs.lstat(o.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set("mode", fmt.Sprintf("%0o", info.Mode()))
|
||||
m.Set("mtime", info.ModTime().Format(metadataTimeFormat))
|
||||
return nil
|
||||
}
|
||||
37
backend/local/metadata_unix.go
Normal file
37
backend/local/metadata_unix.go
Normal file
@@ -0,0 +1,37 @@
|
||||
//go:build openbsd || solaris
|
||||
// +build openbsd solaris
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
info, err := o.fs.lstat(o.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat, ok := info.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
fs.Debugf(o, "didn't return Stat_t as expected")
|
||||
return nil
|
||||
}
|
||||
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
|
||||
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
|
||||
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
|
||||
if stat.Rdev != 0 {
|
||||
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
|
||||
}
|
||||
setTime := func(key string, t syscall.Timespec) {
|
||||
m.Set(key, time.Unix(t.Unix()).Format(metadataTimeFormat))
|
||||
}
|
||||
setTime("atime", stat.Atim)
|
||||
setTime("mtime", stat.Mtim)
|
||||
return nil
|
||||
}
|
||||
34
backend/local/metadata_windows.go
Normal file
34
backend/local/metadata_windows.go
Normal file
@@ -0,0 +1,34 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
info, err := o.fs.lstat(o.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat, ok := info.Sys().(*syscall.Win32FileAttributeData)
|
||||
if !ok {
|
||||
fs.Debugf(o, "didn't return Win32FileAttributeData as expected")
|
||||
return nil
|
||||
}
|
||||
// FIXME do something with stat.FileAttributes ?
|
||||
m.Set("mode", fmt.Sprintf("%0o", info.Mode()))
|
||||
setTime := func(key string, t syscall.Filetime) {
|
||||
m.Set(key, time.Unix(0, t.Nanoseconds()).Format(metadataTimeFormat))
|
||||
}
|
||||
setTime("atime", stat.LastAccessTime)
|
||||
setTime("mtime", stat.LastWriteTime)
|
||||
setTime("btime", stat.CreationTime)
|
||||
return nil
|
||||
}
|
||||
16
backend/local/setbtime.go
Normal file
16
backend/local/setbtime.go
Normal file
@@ -0,0 +1,16 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const haveSetBTime = false
|
||||
|
||||
// setBTime changes the the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
28
backend/local/setbtime_windows.go
Normal file
28
backend/local/setbtime_windows.go
Normal file
@@ -0,0 +1,28 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const haveSetBTime = true
|
||||
|
||||
// setBTime sets the the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
h, err := syscall.Open(name, os.O_RDWR, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
closeErr := syscall.Close(h)
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
bFileTime := syscall.NsecToFiletime(btime.UnixNano())
|
||||
return syscall.SetFileTime(h, &bFileTime, nil, nil)
|
||||
}
|
||||
87
backend/local/xattr.go
Normal file
87
backend/local/xattr.go
Normal file
@@ -0,0 +1,87 @@
|
||||
//go:build !openbsd && !plan9
|
||||
// +build !openbsd,!plan9
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
xattrPrefix = "user." // FIXME is this correct for all unixes?
|
||||
xattrSupported = xattr.XATTR_SUPPORTED
|
||||
)
|
||||
|
||||
// getXattr returns the extended attributes for an object
|
||||
//
|
||||
// It doesn't return any attributes owned by this backend in
|
||||
// metadataKeys
|
||||
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
if !xattrSupported {
|
||||
return nil, nil
|
||||
}
|
||||
var list []string
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
list, err = xattr.List(o.path)
|
||||
} else {
|
||||
list, err = xattr.LList(o.path)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read xattr: %w", err)
|
||||
}
|
||||
if len(list) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
metadata = make(fs.Metadata, len(list))
|
||||
for _, k := range list {
|
||||
var v []byte
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
v, err = xattr.Get(o.path, k)
|
||||
} else {
|
||||
v, err = xattr.LGet(o.path, k)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read xattr key %q: %w", k, err)
|
||||
}
|
||||
k = strings.ToLower(k)
|
||||
if !strings.HasPrefix(k, xattrPrefix) {
|
||||
continue
|
||||
}
|
||||
k = k[len(xattrPrefix):]
|
||||
if _, found := systemMetadataInfo[k]; found {
|
||||
continue
|
||||
}
|
||||
metadata[k] = string(v)
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// setXattr sets the metadata on the file Xattrs
|
||||
//
|
||||
// It doesn't set any attributes owned by this backend in metadataKeys
|
||||
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
|
||||
if !xattrSupported {
|
||||
return nil
|
||||
}
|
||||
for k, value := range metadata {
|
||||
k = strings.ToLower(k)
|
||||
if _, found := systemMetadataInfo[k]; found {
|
||||
continue
|
||||
}
|
||||
k = xattrPrefix + k
|
||||
v := []byte(value)
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
err = xattr.Set(o.path, k, v)
|
||||
} else {
|
||||
err = xattr.LSet(o.path, k, v)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set xattr key %q: %w", k, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user